From pypy.commits at gmail.com Fri Apr 1 02:39:51 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Mar 2016 23:39:51 -0700 (PDT) Subject: [pypy-commit] pypy default: is_jit_debug takes the integer opnum as argument Message-ID: <56fe17b7.c818c20a.d27e2.3721@mx.google.com> Author: Richard Plangger Branch: Changeset: r83474:7e0e8c6d8985 Date: 2016-04-01 08:38 +0200 http://bitbucket.org/pypy/pypy/changeset/7e0e8c6d8985/ Log: is_jit_debug takes the integer opnum as argument diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if rop.is_jit_debug(op): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): From pypy.commits at gmail.com Fri Apr 1 02:42:20 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 23:42:20 -0700 (PDT) Subject: [pypy-commit] pypy default: an attempt to fix translation Message-ID: <56fe184c.41e11c0a.d0074.6558@mx.google.com> Author: fijal Branch: Changeset: r83475:42bd4abc3cf8 Date: 2016-04-01 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/42bd4abc3cf8/ Log: an attempt to fix translation diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -181,9 +181,10 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote(space.str_w(w_obj)) + jit.promote_string(space.unicode_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): - jit.promote(space.unicode_w(w_obj)) + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) else: jit.promote(w_obj) return w_obj From pypy.commits at gmail.com Fri Apr 1 02:42:22 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 23:42:22 -0700 (PDT) Subject: [pypy-commit] pypy default: merge Message-ID: <56fe184e.a3abc20a.a4e35.3c95@mx.google.com> Author: fijal Branch: Changeset: r83476:22530b29f323 Date: 2016-04-01 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/22530b29f323/ Log: merge diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if rop.is_jit_debug(op): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): From pypy.commits at gmail.com Fri Apr 1 03:44:46 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Fri, 01 Apr 2016 00:44:46 -0700 (PDT) Subject: [pypy-commit] pypy default: Make attrgetter a single type -- this brings PyPy in line with CPython. Message-ID: <56fe26ee.41d91c0a.353e8.556b@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83477:a13a0320f6f3 Date: 2016-03-31 16:15 -0700 http://bitbucket.org/pypy/pypy/changeset/a13a0320f6f3/ Log: Make attrgetter a single type -- this brings PyPy in line with CPython. e.g. code that does "if type(x) is operator.attrgetter:" now works. (I'm sorry.) Tested with stupid benchmarks and there is no significant difference. e.g.: (in pypy/pypy/module/operator): $ pypy -m timeit -s 'import operator; a = operator.attrgetter("real"); items = range(10000)' 'for x in items: a(x)' 10000 loops, best of 3: 71.7 usec per loop $ pypy -m timeit -s 'import app_operator as operator; a = operator.attrgetter("real"); items = range(10000)' 'for x in items: a(x)' 10000 loops, best of 3: 66.7 usec per loop $ pypy -m timeit -s 'import app_operator as operator; a = operator.attrgetter("real"); item = 1' 'a(1)' 100000000 loops, best of 3: 0.0073 usec per loop $ pypy -m timeit -s 'import operator; a = operator.attrgetter("real"); item = 1' 'a(1)' 100000000 loops, best of 3: 0.00694 usec per loop $ pypy -m timeit -s 'import app_operator as operator; a = operator.attrgetter("real.real"); items = range(10000)' 'for x in items: a(x)' 1000 loops, best of 3: 501 usec per loop $ pypy -m timeit -s 'import operator; a = operator.attrgetter("real.real"); items = range(10000)' 'for x in items: a(x)' 1000 loops, best of 3: 504 usec per loop $ pypy -m timeit -s 'import operator; a = operator.attrgetter("real.real", "real"); items = range(10000)' 'for x in items: a(x)' 1000 loops, best of 3: 1.74 msec per loop $ pypy -m timeit -s 'import app_operator as operator; a = operator.attrgetter("real.real", "real"); items = range(10000)' 'for x in items: a(x)' 1000 loops, best of 3: 1.82 msec per loop diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -79,54 +79,45 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - return _raise_typeerror - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) From pypy.commits at gmail.com Fri Apr 1 03:44:48 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 00:44:48 -0700 (PDT) Subject: [pypy-commit] pypy default: Add more tests Message-ID: <56fe26f0.a151c20a.f09b0.5054@mx.google.com> Author: Armin Rigo Branch: Changeset: r83478:ea5f0b463c5d Date: 2016-04-01 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/ea5f0b463c5d/ Log: Add more tests diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -47,7 +47,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: From pypy.commits at gmail.com Fri Apr 1 03:44:50 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 00:44:50 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <56fe26f2.10921c0a.12120.ffff8903@mx.google.com> Author: Armin Rigo Branch: Changeset: r83479:96c8916a4c64 Date: 2016-04-01 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/96c8916a4c64/ Log: merge heads diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -181,9 +181,10 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote(space.str_w(w_obj)) + jit.promote_string(space.unicode_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): - jit.promote(space.unicode_w(w_obj)) + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) else: jit.promote(w_obj) return w_obj diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if rop.is_jit_debug(op): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): From pypy.commits at gmail.com Fri Apr 1 03:46:33 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 00:46:33 -0700 (PDT) Subject: [pypy-commit] pypy default: remove unused dict Message-ID: <56fe2759.2a6ec20a.20d85.500c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83480:ce466388b1be Date: 2016-04-01 09:45 +0200 http://bitbucket.org/pypy/pypy/changeset/ce466388b1be/ Log: remove unused dict diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -279,7 +279,6 @@ self._bigints = [] self._bigints_dict = {} self._floats = [] - self._floats_dict = {} self._snapshots = [] for i, inparg in enumerate(inputargs): inparg.set_position(i) @@ -305,7 +304,6 @@ self._bigints_dict = {} self._refs_dict = llhelper.new_ref_dict_3() - self._floats_dict = {} debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) debug_print(" total snapshots: " + str(self._total_snapshots)) From pypy.commits at gmail.com Fri Apr 1 03:52:36 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 00:52:36 -0700 (PDT) Subject: [pypy-commit] pypy default: Add a warning to the docstring Message-ID: <56fe28c4.c50a1c0a.7964e.ffff8bd2@mx.google.com> Author: Armin Rigo Branch: Changeset: r83481:5731bf1c66b3 Date: 2016-04-01 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/5731bf1c66b3/ Log: Add a warning to the docstring diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -172,7 +172,7 @@ def _promote(space, w_obj): """ Promote the first argument of the function and return it. Promote is by value for ints, floats, strs, unicodes (but not subclasses thereof) and by - reference otherwise. + reference otherwise. (Unicodes not supported right now.) This function is experimental!""" from rpython.rlib import jit From pypy.commits at gmail.com Fri Apr 1 03:54:25 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 00:54:25 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: merged default, translation issue Message-ID: <56fe2931.890bc30a.f2c19.5335@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83482:132688540925 Date: 2016-04-01 09:53 +0200 http://bitbucket.org/pypy/pypy/changeset/132688540925/ Log: merged default, translation issue diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,22 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + jit.promote(space.unicode_w(w_obj)) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + assert _promote(u"abc") == u"abc" + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -534,7 +534,7 @@ looptoken._ll_function_addr = rawstart if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, None, ops_offset, unique_id=unique_id) + log.write(inputargs, operations, None, ops_offset=ops_offset, unique_id=unique_id) self.fixup_target_tokens(rawstart) self.teardown() # oprofile support @@ -647,15 +647,28 @@ pass elif gloc is not bloc: self.mov(gloc, bloc) + offset = self.mc.get_relative_pos() self.mc.JMP_l(0) + self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) - offset = self.mc.get_relative_pos() - 4 rawstart = self.materialize_loop(looptoken) - # update the jump to the real trace - self._patch_jump_for_descr(rawstart + offset, asminfo.rawstart) + # update the jump (above) to the real trace + self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def _patch_jump_to(self, adr_jump_offset, adr_new_target): + assert adr_jump_offset != 0 + offset = adr_new_target - (adr_jump_offset + 5) + mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) + if rx86.fits_in_32bits(offset): + mc.JMP_l(offset) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.copy_to_raw_memory(adr_jump_offset) + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -793,10 +806,6 @@ def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr.adr_jump_offset - self._patch_jump_for_descr(adr_jump_offset, adr_new_target) - faildescr.adr_jump_offset = 0 # means "patched" - - def _patch_jump_for_descr(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -817,6 +826,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if rop.is_jit_debug(op): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -478,7 +478,7 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - _log.write(inputargs, operations, name=name, unique_id=unique_id) + _log.write(inputargs, operations, None, name=name, unique_id=unique_id) # TODO remove old metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', None, name, memo) @@ -491,7 +491,7 @@ def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - _log.write(inputargs, operations, faildescr=faildescr) + _log.write(inputargs, operations, faildescr) # TODO remove old metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", memo=memo) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -55,7 +55,7 @@ def log_trace(self, tag, metainterp_sd, mc, memo=None): if self.cintf.jitlog_filter(tag): - return + return EMPTY_TRACE_LOG assert isinstance(tag, int) if memo is None: memo = {} @@ -94,8 +94,13 @@ chr((val >> 48) & 0xff), chr((val >> 56)& 0xff)]) +class BaseLogTrace(object): + def write(self, args, ops, faildescr=None, ops_offset={}, name=None, unique_id=0): + return None -class LogTrace(object): +EMPTY_TRACE_LOG = BaseLogTrace() + +class LogTrace(BaseLogTrace): def __init__(self, tag, memo, metainterp_sd, mc, logger): self.memo = memo self.metainterp_sd = metainterp_sd @@ -107,15 +112,15 @@ self.logger = logger def write(self, args, ops, faildescr=None, ops_offset={}, - name=None, unique_id=None): + name=None, unique_id=0): log = self.logger - if not name: + if name is None: name = '' # write the initial tag if faildescr is None: string = self.logger.encode_str('loop') + \ - self.logger.encode_le_addr(unique_id or 0) + \ + self.logger.encode_le_addr(unique_id) + \ self.logger.encode_str(name or '') log.write_marked(self.tag, string) else: diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -578,7 +578,6 @@ n -= 1 return res # - py.test.raises(InvalidVirtualRef, "fn(10)") py.test.raises(UnknownException, "self.meta_interp(fn, [10])") def test_call_virtualref_already_forced(self): From pypy.commits at gmail.com Fri Apr 1 04:24:15 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 01:24:15 -0700 (PDT) Subject: [pypy-commit] pypy default: Another translation fix attempt Message-ID: <56fe302f.aa0ac20a.7e388.5ad3@mx.google.com> Author: Armin Rigo Branch: Changeset: r83483:648874ef8243 Date: 2016-04-01 09:30 +0100 http://bitbucket.org/pypy/pypy/changeset/648874ef8243/ Log: Another translation fix attempt diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -181,7 +181,7 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote_string(space.unicode_w(w_obj)) + jit.promote_string(space.str_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): raise OperationError(space.w_TypeError, space.wrap( "promoting unicode unsupported")) From pypy.commits at gmail.com Fri Apr 1 04:58:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 01:58:26 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: refactoring the jitlog, inlines encode methods, finish_once of cpu now prints enter count of bridges, entries and loops Message-ID: <56fe3832.654fc20a.2c843.6c76@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83484:992293e7c5b7 Date: 2016-04-01 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/992293e7c5b7/ Log: refactoring the jitlog, inlines encode methods, finish_once of cpu now prints enter count of bridges,entries and loops diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -352,7 +352,7 @@ self.loop_run_counters.append(struct) return struct - def finish_once(self): + def finish_once(self, jitlog): if self._debug: debug_start('jit-backend-counts') for i in range(len(self.loop_run_counters)): @@ -372,6 +372,11 @@ debug_print(prefix + ':' + str(struct.i)) debug_stop('jit-backend-counts') + if jitlog: + # this is always called, the jitlog knows if it is enabled + for i, struct in enumerate(self.loop_run_counters): + jitlog.log_jit_counter(struct) + @staticmethod @rgc.no_collect def _reacquire_gil_asmgcc(css, old_rpy_fastgil): diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -65,8 +65,8 @@ self.assembler.setup_once() @rgc.no_release_gil - def finish_once(self): - self.assembler.finish_once() + def finish_once(self, jitlog=None): + self.assembler.finish_once(jitlog) self.profile_agent.shutdown() def dump_loop_token(self, looptoken): diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -3,7 +3,7 @@ from rpython.jit.metainterp.history import ConstInt, ConstFloat from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rlib.objectmodel import compute_unique_id +from rpython.rlib.objectmodel import compute_unique_id, always_inline import sys import weakref @@ -25,8 +25,42 @@ # the machine code was patched (e.g. guard) MARK_STITCH_BRIDGE = 0x19 +MARK_JIT_LOOP_COUNTER = 0x20 +MARK_JIT_BRIDGE_COUNTER = 0x21 +MARK_JIT_ENTRY_COUNTER = 0x22 + IS_32_BIT = sys.maxint == 2**31-1 + at always_inline +def encode_str(string): + return encode_le_32bit(len(string)) + string + + at always_inline +def encode_le_16bit(val): + return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) + + at always_inline +def encode_le_32bit(val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff)]) + + at always_inline +def encode_le_addr(val): + if IS_32_BIT: + return encode_be_32bit(val) + else: + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff), + chr((val >> 32) & 0xff), + chr((val >> 40) & 0xff), + chr((val >> 48) & 0xff), + chr((val >> 56)& 0xff)]) + + class VMProfJitLogger(object): def __init__(self): @@ -37,24 +71,40 @@ def setup_once(self): self.is_setup = True self.cintf.jitlog_try_init_using_env() - if self.cintf.jitlog_filter(0x0): + if not self.cintf.jitlog_enabled(): return count = len(resoperations.opname) mark = MARK_RESOP_META for opnum, opname in resoperations.opname.items(): - line = self.encode_le_16bit(opnum) + self.encode_str(opname.lower()) - self.write_marked(mark, line) + line = encode_le_16bit(opnum) + encode_str(opname.lower()) + self._write_marked(mark, line) def teardown(self): self.cintf.jitlog_teardown() - def write_marked(self, mark, line): + def _write_marked(self, mark, line): + if not we_are_translated(): + assert self.cintf.jitlog_enabled() if not self.is_setup: self.setup_once() self.cintf.jitlog_write_marked(mark, line, len(line)) + def log_jit_counter(self, struct): + if not self.cintf.jitlog_enabled(): + return EMPTY_TRACE_LOG + le_addr = encode_le_addr(struct.number) + # not an address (but a number) but it is a machine word + le_count = encode_le_addr(struct.i) + if struct.type == 'l': + tag = MARK_JIT_LOOP_COUNTER + elif struct.type == 'b': + tag = MARK_JIT_BRIDGE_COUNTER + else: + tag = MARK_JIT_ENTRY_COUNTER + self._write_marked(tag, le_addr + le_count) + def log_trace(self, tag, metainterp_sd, mc, memo=None): - if self.cintf.jitlog_filter(tag): + if not self.cintf.jitlog_enabled(): return EMPTY_TRACE_LOG assert isinstance(tag, int) if memo is None: @@ -62,37 +112,12 @@ return LogTrace(tag, memo, metainterp_sd, mc, self) def log_patch_guard(self, descr_number, addr): - if self.cintf.jitlog_filter(MARK_STITCH_BRIDGE): + if not self.cintf.jitlog_enabled(): return - le_descr_number = self.encode_le_addr(descr_number) - le_addr = self.encode_le_addr(addr) + le_descr_number = encode_le_addr(descr_number) + le_addr = encode_le_addr(addr) lst = [le_descr_number, le_addr] - self.write_marked(MARK_STITCH_BRIDGE, ''.join(lst)) - - def encode_str(self, string): - return self.encode_le_32bit(len(string)) + string - - def encode_le_16bit(self, val): - return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) - - def encode_le_32bit(self, val): - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff)]) - - def encode_le_addr(self,val): - if IS_32_BIT: - return encode_be_32bit(val) - else: - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff), - chr((val >> 32) & 0xff), - chr((val >> 40) & 0xff), - chr((val >> 48) & 0xff), - chr((val >> 56)& 0xff)]) + self._write_marked(MARK_STITCH_BRIDGE, ''.join(lst)) class BaseLogTrace(object): def write(self, args, ops, faildescr=None, ops_offset={}, name=None, unique_id=0): @@ -119,33 +144,33 @@ name = '' # write the initial tag if faildescr is None: - string = self.logger.encode_str('loop') + \ - self.logger.encode_le_addr(unique_id) + \ - self.logger.encode_str(name or '') - log.write_marked(self.tag, string) + string = encode_str('loop') + \ + encode_le_addr(unique_id) + \ + encode_str(name or '') + log._write_marked(self.tag, string) else: unique_id = compute_unique_id(faildescr) - string = self.logger.encode_str('bridge') + \ - self.logger.encode_le_addr(unique_id) + \ - self.logger.encode_str(name or '') - log.write_marked(self.tag, string) + string = encode_str('bridge') + \ + encode_le_addr(unique_id) + \ + encode_str(name or '') + log._write_marked(self.tag, string) # input args str_args = [self.var_to_str(arg) for arg in args] - string = self.logger.encode_str(','.join(str_args)) - log.write_marked(MARK_INPUT_ARGS, string) + string = encode_str(','.join(str_args)) + log._write_marked(MARK_INPUT_ARGS, string) # assembler address (to not duplicate it in write_code_dump) if self.mc is not None: absaddr = self.mc.absolute_addr() rel = self.mc.get_relative_pos() # packs as two unsigend longs - le_addr1 = self.logger.encode_le_addr(absaddr) - le_addr2 = self.logger.encode_le_addr(absaddr + rel) - log.write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) + le_addr1 = encode_le_addr(absaddr) + le_addr2 = encode_le_addr(absaddr + rel) + log._write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) for i,op in enumerate(ops): mark, line = self.encode_op(op) - log.write_marked(mark, line) + log._write_marked(mark, line) self.write_core_dump(ops, i, op, ops_offset) self.memo = {} @@ -160,18 +185,18 @@ """ str_args = [self.var_to_str(arg) for arg in op.getarglist()] descr = op.getdescr() - le_opnum = self.logger.encode_le_16bit(op.getopnum()) + le_opnum = encode_le_16bit(op.getopnum()) str_res = self.var_to_str(op) line = ','.join([str_res] + str_args) if descr: descr_str = descr.repr_of_descr() line = line + ',' + descr_str - string = self.logger.encode_str(line) + string = encode_str(line) descr_number = compute_unique_id(descr) - le_descr_number = self.logger.encode_le_addr(descr_number) + le_descr_number = encode_le_addr(descr_number) return MARK_RESOP_DESCR, le_opnum + string + le_descr_number else: - string = self.logger.encode_str(line) + string = encode_str(line) return MARK_RESOP, le_opnum + string @@ -208,9 +233,9 @@ count = end_offset - start_offset dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset, count) - offset = self.logger.encode_le_16bit(start_offset) - edump = self.logger.encode_str(dump) - self.logger.write_marked(MARK_ASM, offset + edump) + offset = encode_le_16bit(start_offset) + edump = encode_str(dump) + self.logger._write_marked(MARK_ASM, offset + edump) def var_to_str(self, arg): try: diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -1040,7 +1040,8 @@ def finish(): if self.metainterp_sd.profiler.initialized: self.metainterp_sd.profiler.finish() - self.metainterp_sd.cpu.finish_once() + jitlog = self.metainterp_sd.jitlog + self.metainterp_sd.cpu.finish_once(jitlog) if self.cpu.translate_support_code: call_final_function(self.translator, finish, diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -64,7 +64,7 @@ jitlog_write_marked = rffi.llexternal("jitlog_write_marked", [rffi.INT, rffi.CCHARP, rffi.INT], lltype.Void, compilation_info=eci) - jitlog_filter = rffi.llexternal("jitlog_filter", [rffi.INT], rffi.INT, + jitlog_enabled = rffi.llexternal("jitlog_enabled", [], rffi.INT, compilation_info=eci) jitlog_teardown = rffi.llexternal("jitlog_teardown", [], lltype.Void, compilation_info=eci) diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h --- a/rpython/rlib/rvmprof/src/jitlog_main.h +++ b/rpython/rlib/rvmprof/src/jitlog_main.h @@ -9,9 +9,9 @@ static int jitlog_ready = 0; RPY_EXTERN -int jitlog_filter(int tag) +int jitlog_enabled() { - return 0; // TODO + return jitlog_ready; } RPY_EXTERN diff --git a/rpython/rlib/rvmprof/src/rvmprof.h b/rpython/rlib/rvmprof/src/rvmprof.h --- a/rpython/rlib/rvmprof/src/rvmprof.h +++ b/rpython/rlib/rvmprof/src/rvmprof.h @@ -11,6 +11,6 @@ RPY_EXTERN char * jitlog_init(int, const char*); RPY_EXTERN void jitlog_try_init_using_env(void); -RPY_EXTERN int jitlog_filter(int); +RPY_EXTERN int jitlog_enabled(); RPY_EXTERN void jitlog_write_marked(int, char*, int); RPY_EXTERN void jitlog_teardown(); From pypy.commits at gmail.com Fri Apr 1 04:58:27 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 01:58:27 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: catchup default Message-ID: <56fe3833.aaf8c20a.d6370.ffffd5a6@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83485:6f53abf601b1 Date: 2016-04-01 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/6f53abf601b1/ Log: catchup default diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -172,7 +172,7 @@ def _promote(space, w_obj): """ Promote the first argument of the function and return it. Promote is by value for ints, floats, strs, unicodes (but not subclasses thereof) and by - reference otherwise. + reference otherwise. (Unicodes not supported right now.) This function is experimental!""" from rpython.rlib import jit @@ -181,9 +181,10 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote(space.str_w(w_obj)) + jit.promote_string(space.str_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): - jit.promote(space.unicode_w(w_obj)) + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) else: jit.promote(w_obj) return w_obj diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -79,54 +79,45 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - return _raise_typeerror - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -47,7 +47,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -279,7 +279,6 @@ self._bigints = [] self._bigints_dict = {} self._floats = [] - self._floats_dict = {} self._snapshots = [] for i, inparg in enumerate(inputargs): inparg.set_position(i) @@ -305,7 +304,6 @@ self._bigints_dict = {} self._refs_dict = llhelper.new_ref_dict_3() - self._floats_dict = {} debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) debug_print(" total snapshots: " + str(self._total_snapshots)) From pypy.commits at gmail.com Fri Apr 1 06:05:03 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 01 Apr 2016 03:05:03 -0700 (PDT) Subject: [pypy-commit] pypy default: backout the merge of the branch we need to think more about Message-ID: <56fe47cf.034cc20a.40b40.ffff8470@mx.google.com> Author: fijal Branch: Changeset: r83486:56c0228015a1 Date: 2016-04-01 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/56c0228015a1/ Log: backout the merge of the branch we need to think more about diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,9 +277,18 @@ raise NotImplementedError def get_traceback(self): - """Get the PyTraceback object, for app-level Python code. + """Calling this marks the PyTraceback as escaped, i.e. it becomes + accessible and inspectable by app-level Python code. For the JIT. + Note that this has no effect if there are already several traceback + frames recorded, because in this case they are already marked as + escaping by executioncontext.leave() being called with + got_exception=True. """ - return self._application_traceback + from pypy.interpreter.pytraceback import PyTraceback + tb = self._application_traceback + if tb is not None and isinstance(tb, PyTraceback): + tb.frame.mark_as_escaped() + return tb def set_traceback(self, traceback): """Set the current traceback.""" diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -74,6 +74,15 @@ finally: frame_vref = self.topframeref self.topframeref = frame.f_backref + if frame.escaped or got_exception: + # if this frame escaped to applevel, we must ensure that also + # f_back does + f_back = frame.f_backref() + if f_back: + f_back.mark_as_escaped() + # force the frame (from the JIT point of view), so that it can + # be accessed also later + frame_vref() jit.virtual_ref_finish(frame_vref, frame) # ________________________________________________________________ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,6 +65,7 @@ last_exception = None f_backref = jit.vref_None + escaped = False # see mark_as_escaped() debugdata = None pycode = None # code object executed by that frame @@ -151,6 +152,15 @@ assert isinstance(cell, Cell) return cell + def mark_as_escaped(self): + """ + Must be called on frames that are exposed to applevel, e.g. by + sys._getframe(). This ensures that the virtualref holding the frame + is properly forced by ec.leave(), and thus the frame will be still + accessible even after the corresponding C stack died. + """ + self.escaped = True + def append_block(self, block): assert block.previous is self.lastblock self.lastblock = block diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -37,6 +37,7 @@ raise OperationError(space.w_ValueError, space.wrap("call stack is not deep enough")) if depth == 0: + f.mark_as_escaped() return space.wrap(f) depth -= 1 f = ec.getnextframe_nohidden(f) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -475,6 +475,8 @@ def __call__(self): if self._state == 'non-forced': self._state = 'forced' + elif self._state == 'invalid': + raise InvalidVirtualRef return self._x @property @@ -485,7 +487,7 @@ def _finish(self): if self._state == 'non-forced': - self._state = 'forgotten' + self._state = 'invalid' class DirectJitVRef(DirectVRef): def __init__(self, x): From pypy.commits at gmail.com Fri Apr 1 06:05:05 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 01 Apr 2016 03:05:05 -0700 (PDT) Subject: [pypy-commit] pypy default: merge Message-ID: <56fe47d1.a8c0c20a.6e82e.ffff8ab0@mx.google.com> Author: fijal Branch: Changeset: r83487:325494587c00 Date: 2016-04-01 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/325494587c00/ Log: merge diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -172,7 +172,7 @@ def _promote(space, w_obj): """ Promote the first argument of the function and return it. Promote is by value for ints, floats, strs, unicodes (but not subclasses thereof) and by - reference otherwise. + reference otherwise. (Unicodes not supported right now.) This function is experimental!""" from rpython.rlib import jit @@ -181,7 +181,7 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote_string(space.unicode_w(w_obj)) + jit.promote_string(space.str_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): raise OperationError(space.w_TypeError, space.wrap( "promoting unicode unsupported")) diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -79,54 +79,45 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - return _raise_typeerror - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -47,7 +47,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -279,7 +279,6 @@ self._bigints = [] self._bigints_dict = {} self._floats = [] - self._floats_dict = {} self._snapshots = [] for i, inparg in enumerate(inputargs): inparg.set_position(i) @@ -305,7 +304,6 @@ self._bigints_dict = {} self._refs_dict = llhelper.new_ref_dict_3() - self._floats_dict = {} debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) debug_print(" total snapshots: " + str(self._total_snapshots)) From pypy.commits at gmail.com Fri Apr 1 06:51:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 03:51:18 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: do not release gil for two logger functions. they are called in @rgc.no_release_gil Message-ID: <56fe52a6.41e11c0a.d0074.ffffc3f5@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83488:4a28028053f5 Date: 2016-04-01 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/4a28028053f5/ Log: do not release gil for two logger functions. they are called in @rgc.no_release_gil diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -91,7 +91,7 @@ def log_jit_counter(self, struct): if not self.cintf.jitlog_enabled(): - return EMPTY_TRACE_LOG + return le_addr = encode_le_addr(struct.number) # not an address (but a number) but it is a machine word le_count = encode_le_addr(struct.i) diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -63,9 +63,11 @@ [], lltype.Void, compilation_info=eci) jitlog_write_marked = rffi.llexternal("jitlog_write_marked", [rffi.INT, rffi.CCHARP, rffi.INT], - lltype.Void, compilation_info=eci) + lltype.Void, compilation_info=eci, + releasegil=False) jitlog_enabled = rffi.llexternal("jitlog_enabled", [], rffi.INT, - compilation_info=eci) + compilation_info=eci, + releasegil=False) jitlog_teardown = rffi.llexternal("jitlog_teardown", [], lltype.Void, compilation_info=eci) From pypy.commits at gmail.com Fri Apr 1 06:51:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 03:51:20 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: another releasegil=False Message-ID: <56fe52a8.838d1c0a.a7662.ffffd075@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83489:0edef2d07438 Date: 2016-04-01 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/0edef2d07438/ Log: another releasegil=False diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -60,7 +60,8 @@ jitlog_init = rffi.llexternal("jitlog_init", [rffi.INT, rffi.CCHARP], rffi.CCHARP, compilation_info=eci) jitlog_try_init_using_env = rffi.llexternal("jitlog_try_init_using_env", - [], lltype.Void, compilation_info=eci) + [], lltype.Void, compilation_info=eci, + releasegil=False) jitlog_write_marked = rffi.llexternal("jitlog_write_marked", [rffi.INT, rffi.CCHARP, rffi.INT], lltype.Void, compilation_info=eci, From pypy.commits at gmail.com Fri Apr 1 06:51:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 03:51:22 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: try init from env variable rewritten, there is currently no filtering mechanism. filtering does not make sense anymore Message-ID: <56fe52aa.49f9c20a.c94b4.ffff95b8@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83490:b8dc684ce886 Date: 2016-04-01 12:50 +0200 http://bitbucket.org/pypy/pypy/changeset/b8dc684ce886/ Log: try init from env variable rewritten, there is currently no filtering mechanism. filtering does not make sense anymore diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h --- a/rpython/rlib/rvmprof/src/jitlog_main.h +++ b/rpython/rlib/rvmprof/src/jitlog_main.h @@ -21,53 +21,22 @@ char *filename = getenv("JITLOG"); if (filename && filename[0]) { - char *newfilename = NULL, *escape; - char *colon = strchr(filename, ':'); - if (filename[0] == '+') { - filename += 1; - colon = NULL; + // mode is 775 + mode_t mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH; + jitlog_fd = open(filename, O_WRONLY | O_CREAT, mode); + if (jitlog_fd == -1) { + perror("could not open"); + exit(-1); } - if (!colon) { - /* JITLOG=+filename (or just 'filename') --- profiling version */ - //pypy_setup_profiling(); - } else { - /* JITLOG=prefix:filename --- conditional logging */ - int n = colon - filename; - jitlog_prefix = malloc(n + 1); - memcpy(jitlog_prefix, filename, n); - //debug_prefix[n] = '\0'; - filename = colon + 1; - } - escape = strstr(filename, "%d"); - if (escape) { - /* a "%d" in the filename is replaced with the pid */ - newfilename = malloc(strlen(filename) + 32); - if (newfilename != NULL) { - char *p = newfilename; - memcpy(p, filename, escape - filename); - p += escape - filename; - sprintf(p, "%ld", (long)getpid()); - strcat(p, escape + 2); - filename = newfilename; - } - } - if (strcmp(filename, "-") != 0) { - // mode is 775 - mode_t mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH; - jitlog_fd = open(filename, O_WRONLY | O_CREAT, mode); - } - - if (escape) { - free(newfilename); /* if not null */ - /* the env var is kept and passed to subprocesses */ - } else { + } else { + jitlog_ready = 0; + return; + } #ifndef _WIN32 - unsetenv("JITLOG"); + unsetenv("JITLOG"); #else - putenv("JITLOG="); + putenv("JITLOG="); #endif - } - } jitlog_ready = 1; } From pypy.commits at gmail.com Fri Apr 1 07:48:28 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 01 Apr 2016 04:48:28 -0700 (PDT) Subject: [pypy-commit] benchmarks default: add the ability to number the counts Message-ID: <56fe600c.c711c30a.fb3be.ffffad87@mx.google.com> Author: fijal Branch: Changeset: r350:b75559ef571b Date: 2016-04-01 13:48 +0200 http://bitbucket.org/pypy/benchmarks/changeset/b75559ef571b/ Log: add the ability to number the counts diff --git a/warmup/pypy-graph-alloc-removal.py b/warmup/pypy-graph-alloc-removal.py --- a/warmup/pypy-graph-alloc-removal.py +++ b/warmup/pypy-graph-alloc-removal.py @@ -43,4 +43,8 @@ remover.remove_mallocs_once(g) return time.time() - start -main(graph, 100) +if len(sys.argv) > 2: + count = int(sys.argv[1]) +else: + count = 100 +main(graph, count) From pypy.commits at gmail.com Fri Apr 1 07:49:20 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 01 Apr 2016 04:49:20 -0700 (PDT) Subject: [pypy-commit] benchmarks default: improve Message-ID: <56fe6040.8216c20a.6ce2f.ffffb004@mx.google.com> Author: fijal Branch: Changeset: r351:59290b59a24e Date: 2016-04-01 13:49 +0200 http://bitbucket.org/pypy/benchmarks/changeset/59290b59a24e/ Log: improve diff --git a/warmup/pypy-graph-alloc-removal.py b/warmup/pypy-graph-alloc-removal.py --- a/warmup/pypy-graph-alloc-removal.py +++ b/warmup/pypy-graph-alloc-removal.py @@ -43,7 +43,7 @@ remover.remove_mallocs_once(g) return time.time() - start -if len(sys.argv) > 2: +if len(sys.argv) >= 2: count = int(sys.argv[1]) else: count = 100 From pypy.commits at gmail.com Fri Apr 1 10:15:00 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 07:15:00 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: x86-32 support Message-ID: <56fe8264.8d571c0a.b4d9.21e7@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83491:fd234418525e Date: 2016-04-01 16:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fd234418525e/ Log: x86-32 support diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -4,7 +4,7 @@ from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite, gcreftracer from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, - DEBUG_COUNTER, debug_bridge) + DEBUG_COUNTER) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.metainterp.history import (Const, VOID, ConstInt) @@ -523,10 +523,10 @@ looptoken.number, loopname, r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) - debug_print(" gc table: 0x%x" % r_uint(rawstart)) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) - debug_print(" loop: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + looppos)) debug_print(" failures: 0x%x" % r_uint(rawstart + size_excluding_failure_stuff)) debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) @@ -591,7 +591,16 @@ self.patch_gcref_table(original_loop_token, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) - debug_bridge(descr_number, rawstart, codeendpos) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart + startpos) @@ -667,18 +676,32 @@ self.patch_jump_for_descr(faildescr, rawstart) def reserve_gcref_table(self, allgcrefs): - assert IS_X86_64, "XXX" gcref_table_size = len(allgcrefs) * WORD - # align to a multiple of 16 - gcref_table_size = (gcref_table_size + 15) & ~15 - mc = self.mc - assert mc.get_relative_pos() == 0 - for i in range(gcref_table_size): - mc.writechar('\x00') + if IS_X86_64: + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (%rip - constant). + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + elif IS_X86_32: + # allocate the gc table right now. This lets us write + # machine code with absolute 32-bit addressing. + self.gc_table_addr = self.datablockwrapper.malloc_aligned( + gcref_table_size, alignment=WORD) + # self.setup_gcrefs_list(allgcrefs) def patch_gcref_table(self, looptoken, rawstart): - assert IS_X86_64, "XXX" + if IS_X86_64: + # the gc table is at the start of the machine code + self.gc_table_addr = rawstart + elif IS_X86_32: + # the gc table was already allocated by reserve_gcref_table() + rawstart = self.gc_table_addr + # tracer = gcreftracer.make_gcref_tracer(rawstart, self._allgcrefs) gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) gcreftracers.append(tracer) # keepalive @@ -1396,18 +1419,27 @@ def _patch_load_from_gc_table(self, index): # must be called immediately after a "p"-mode instruction - # has been emitted + # has been emitted. 64-bit mode only. + assert IS_X86_64 address_in_buffer = index * WORD # at the start of the buffer p_location = self.mc.get_relative_pos() offset = address_in_buffer - p_location self.mc.overwrite32(p_location-4, offset) + def _addr_from_gc_table(self, index): + # get the address of the gc table entry 'index'. 32-bit mode only. + assert IS_X86_32 + return self.gc_table_addr + index * WORD + def genop_load_from_gc_table(self, op, arglocs, resloc): [loc] = arglocs assert isinstance(loc, ImmedLoc) assert isinstance(resloc, RegLoc) - self.mc.MOV_rp(resloc.value, 0) # %rip-relative - self._patch_load_from_gc_table(loc.value) + if IS_X86_64: + self.mc.MOV_rp(resloc.value, 0) # %rip-relative + self._patch_load_from_gc_table(loc.value) + elif IS_X86_32: + self.mc.MOV_rj(resloc.value, self._addr_from_gc_table(loc.value)) def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) @@ -1905,8 +1937,11 @@ guardtok.faildescr, regalloc) # faildescrindex, target = self.store_info_on_descr(startpos, guardtok) - self.mc.PUSH_p(0) # %rip-relative - self._patch_load_from_gc_table(faildescrindex) + if IS_X86_64: + self.mc.PUSH_p(0) # %rip-relative + self._patch_load_from_gc_table(faildescrindex) + elif IS_X86_32: + self.mc.PUSH_j(self._addr_from_gc_table(faildescrindex)) self.push_gcmap(self.mc, guardtok.gcmap, push=True) self.mc.JMP(imm(target)) return startpos @@ -2021,8 +2056,11 @@ descr = op.getdescr() faildescrindex = self.get_gcref_from_faildescr(descr) - self.mc.MOV_rp(eax.value, 0) - self._patch_load_from_gc_table(faildescrindex) + if IS_X86_64: + self.mc.MOV_rp(eax.value, 0) + self._patch_load_from_gc_table(faildescrindex) + elif IS_X86_32: + self.mc.MOV_rj(eax.value, self._addr_from_gc_table(faildescrindex)) self.mov(eax, RawEbpLoc(ofs)) arglist = op.getarglist() @@ -2095,11 +2133,15 @@ faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - assert IS_X86_64, "XXX uses the scratch reg" faildescrindex = self.get_gcref_from_faildescr(faildescr) - self.mc.MOV_rp(X86_64_SCRATCH_REG.value, 0) - self._patch_load_from_gc_table(faildescrindex) - self.mc.MOV(raw_stack(ofs), X86_64_SCRATCH_REG) + if IS_X86_64: + self.mc.MOV_rp(X86_64_SCRATCH_REG.value, 0) + self._patch_load_from_gc_table(faildescrindex) + self.mc.MOV(raw_stack(ofs), X86_64_SCRATCH_REG) + elif IS_X86_32: + # XXX need a scratch reg here for efficiency; be more clever + self.mc.PUSH_j(self._addr_from_gc_table(faildescrindex)) + self.mc.POP(raw_stack(ofs)) def _find_nearby_operation(self, delta): regalloc = self._regalloc diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -600,6 +600,7 @@ PUS1_r = insn(rex_nw, register(1), '\x50') PUS1_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) PUS1_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) + PUS1_j = insn(rex_nw, '\xFF', orbyte(6<<3), abs_(1)) PUS1_p = insn(rex_nw, '\xFF', orbyte(6<<3), rip_offset(1)) PUS1_i8 = insn('\x6A', immediate(1, 'b')) PUS1_i32 = insn('\x68', immediate(1, 'i')) @@ -623,6 +624,10 @@ self.PUS1_i32(immed) self.stack_frame_size_delta(+self.WORD) + def PUSH_j(self, abs_addr): + self.PUS1_j(abs_addr) + self.stack_frame_size_delta(+self.WORD) + def PUSH_p(self, rip_offset): self.PUS1_p(rip_offset) self.stack_frame_size_delta(+self.WORD) From pypy.commits at gmail.com Fri Apr 1 10:24:53 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:53 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add missing methods, randomly revert some to ObjectStrategy when called, raise on the rest Message-ID: <56fe84b5.49f9c20a.c94b4.ffffec54@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83494:ab7f77f250ba Date: 2016-03-31 21:20 +0300 http://bitbucket.org/pypy/pypy/changeset/ab7f77f250ba/ Log: add missing methods, randomly revert some to ObjectStrategy when called, raise on the rest diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -90,10 +90,10 @@ """ if isinstance(w_obj, listobject.W_ListObject): cpy_strategy = space.fromcache(CPyListStrategy) - if w_obj.strategy is not cpy_strategy: - raise OperationError(space.w_TypeError, space.wrap( - 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast')) - return w_obj.get_raw_items() # asserts it's a cpyext strategy + if w_obj.strategy is cpy_strategy: + return w_obj.get_raw_items() # asserts it's a cpyext strategy + raise OperationError(space.w_TypeError, space.wrap( + 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast')) @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject) def PySequence_GetSlice(space, w_obj, start, end): @@ -262,13 +262,108 @@ def getslice(self, w_list, start, stop, step, length): #storage = self.unerase(w_list.lstorage) - raise oefmt(w_list.space.w_NotImplementedError, - "settting a slice of a PySequence_Fast is not supported") + raise OperationError(w_list.space.w_NotImplementedError, w_list.space.wrap( + "settting a slice of a PySequence_Fast is not supported")) + + def getitems(self, w_list): + # called when switching list strategy, so convert storage + storage = self.unerase(w_list.lstorage) + retval = [None] * storage._length + for i in range(storage._length): + retval[i] = from_ref(w_list.space, storage._elems[i]) + return retval + + #------------------------------------------ + # all these methods fail or switch strategy and then call ListObjectStrategy's method + + def getitems_fixedsize(self, w_list): + raise NotImplementedError def setslice(self, w_list, start, stop, step, length): #storage = self.unerase(w_list.lstorage) - raise oefmt(w_list.space.w_NotImplementedError, - "settting a slice of a PySequence_Fast is not supported") + raise NotImplementedError + + def get_sizehint(self): + return -1 + + def init_from_list_w(self, w_list, list_w): + raise NotImplementedError + + def clone(self, w_list): + raise NotImplementedError + + def copy_into(self, w_list, w_other): + raise NotImplementedError + + def _resize_hint(self, w_list, hint): + raise NotImplementedError + + def find(self, w_list, w_item, start, stop): + raise NotImplementedError + + def getitems_copy(self, w_list): + raise NotImplementedError + + def getitems_bytes(self, w_list): + raise NotImplementedError + + def getitems_unicode(self, w_list): + raise NotImplementedError + + def getitems_int(self, w_list): + raise NotImplementedError + + def getitems_float(self, w_list): + raise NotImplementedError + + def getstorage_copy(self, w_list): + raise NotImplementedError + + def append(self, w_list, w_item): + w_list.switch_to_object_strategy() + w_list.strategy.append(w_list, w_item) + + def mul(self, w_list, times): + raise NotImplementedError + + def inplace_mul(self, w_list, times): + raise NotImplementedError + + def deleteslice(self, w_list, start, step, slicelength): + raise NotImplementedError + + def pop(self, w_list, index): + w_list.switch_to_object_strategy() + w_list.strategy.pop(w_list, index) + + def pop_end(self, w_list): + w_list.switch_to_object_strategy() + w_list.strategy.pop_end(w_list) + + def insert(self, w_list, index, w_item): + w_list.switch_to_object_strategy() + w_list.strategy.insert(w_list, index, w_item) + + def extend(self, w_list, w_any): + w_list.switch_to_object_strategy() + w_list.strategy.extend(w_list, w_any) + + def _extend_from_list(self, w_list, w_other): + raise NotImplementedError + + def _extend_from_iterable(self, w_list, w_iterable): + raise NotImplementedError + + def reverse(self, w_list): + w_list.switch_to_object_strategy() + w_list.strategy.reverse(w_list) + + def sort(self, w_list, reverse): + w_list.switch_to_object_strategy() + w_list.descr_sort(w_list.space, reverse=reverse) + + def is_empty_strategy(self): + return False PyObjectList = lltype.Ptr(lltype.Array(PyObject, hints={'nolength': True})) From pypy.commits at gmail.com Fri Apr 1 10:24:50 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:50 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: test some list methods Message-ID: <56fe84b2.8673c20a.b2316.ffffec10@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83492:4ef5f2c2ab24 Date: 2016-03-31 19:28 +0300 http://bitbucket.org/pypy/pypy/changeset/4ef5f2c2ab24/ Log: test some list methods diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -164,6 +164,19 @@ space.setitem(w_l, space.wrap(1), space.wrap(13)) assert space.int_w(space.getitem(w_l, space.wrap(1))) == 13 + def test_manipulations(self, space, api): + w = space.wrap + w_l = w([1, 2, 3, 4]) + + api.PySequence_Fast(w_l, "foo") # converts + space.call_method(w_l, 'insert', w(0), w(0)) + assert space.int_w(space.len(w_l)) == 5 + assert space.int_w(space.getitem(w_l, w(3))) == 3 + + api.PySequence_Fast(w_l, "foo") # converts + space.call_method(w_l, 'sort') + assert space.int_w(space.len(w_l)) == 5 + assert space.int_w(space.getitem(w_l, w(0))) == 0 class XAppTestSequenceObject(AppTestCpythonExtensionBase): def test_sequenceobject(self): From pypy.commits at gmail.com Fri Apr 1 10:24:57 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:57 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix strategy for zip() Message-ID: <56fe84b9.a2f2c20a.d765d.fffff45f@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83496:bcaba584080e Date: 2016-04-01 16:48 +0300 http://bitbucket.org/pypy/pypy/changeset/bcaba584080e/ Log: fix strategy for zip() diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -321,16 +321,16 @@ raise NotImplementedError def getitems_bytes(self, w_list): - raise NotImplementedError + return None def getitems_unicode(self, w_list): - raise NotImplementedError + return None def getitems_int(self, w_list): - raise NotImplementedError + return None def getitems_float(self, w_list): - raise NotImplementedError + return None def getstorage_copy(self, w_list): raise NotImplementedError From pypy.commits at gmail.com Fri Apr 1 10:24:52 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:52 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: refactor error checking to make sure the strategy is correct Message-ID: <56fe84b4.4816c20a.5f157.ffffe8bd@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83493:eab8aff36a9f Date: 2016-03-31 19:29 +0300 http://bitbucket.org/pypy/pypy/changeset/eab8aff36a9f/ Log: refactor error checking to make sure the strategy is correct diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -59,11 +59,11 @@ PySequence_Fast(), o is not NULL, and that i is within bounds. """ if isinstance(w_obj, listobject.W_ListObject): - w_res = w_obj.getitem(index) - else: - assert isinstance(w_obj, tupleobject.W_TupleObject) - w_res = w_obj.wrappeditems[index] - return w_res # borrowed ref + return w_obj.getitem(index) + elif isinstance(w_obj, tupleobject.W_TupleObject): + return w_obj.wrappeditems[index] + raise OperationError(space.w_TypeError, space.wrap( + 'PySequence_Fast_GET_ITEM called but object is not a list or sequence')) @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PySequence_Fast_GET_SIZE(space, w_obj): @@ -74,8 +74,10 @@ or tuple.""" if isinstance(w_obj, listobject.W_ListObject): return w_obj.length() - assert isinstance(w_obj, tupleobject.W_TupleObject) - return len(w_obj.wrappeditems) + elif isinstance(w_obj, tupleobject.W_TupleObject): + return len(w_obj.wrappeditems) + raise OperationError(space.w_TypeError, space.wrap( + 'PySequence_Fast_GET_SIZE called but object is not a list or sequence')) @cpython_api([PyObject], PyObjectP) def PySequence_Fast_ITEMS(space, w_obj): @@ -86,8 +88,12 @@ So, only use the underlying array pointer in contexts where the sequence cannot change. """ - assert isinstance(w_obj, listobject.W_ListObject) - return w_obj.get_raw_items() # asserts it's a cpyext strategy + if isinstance(w_obj, listobject.W_ListObject): + cpy_strategy = space.fromcache(CPyListStrategy) + if w_obj.strategy is not cpy_strategy: + raise OperationError(space.w_TypeError, space.wrap( + 'PySequence_Fast_ITEMS called but object is not the result of PySequence_Fast')) + return w_obj.get_raw_items() # asserts it's a cpyext strategy @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject) def PySequence_GetSlice(space, w_obj, start, end): From pypy.commits at gmail.com Fri Apr 1 10:24:55 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:55 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add missing methods, test Message-ID: <56fe84b7.0308c20a.e73c.3c24@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83495:8f34e1689105 Date: 2016-04-01 10:01 +0300 http://bitbucket.org/pypy/pypy/changeset/8f34e1689105/ Log: add missing methods, test diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -1,12 +1,13 @@ -from rpython.rlib import rerased +from rpython.rlib import rerased, jit from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std.listobject import ListStrategy +from pypy.objspace.std.listobject import ( + ListStrategy, UNROLL_CUTOFF, W_ListObject, ObjectListStrategy) from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, Py_ssize_t, PyObject, PyObjectP) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.objspace.std import listobject, tupleobject +from pypy.objspace.std import tupleobject from pypy.module.cpyext.tupleobject import PyTuple_Check, PyTuple_SetItem from pypy.module.cpyext.object import Py_IncRef, Py_DecRef @@ -44,12 +45,12 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if isinstance(w_obj, listobject.W_ListObject): + if isinstance(w_obj, W_ListObject): # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM w_obj.convert_to_cpy_strategy(space) return w_obj try: - return listobject.W_ListObject.newlist_cpyext(space, space.listview(w_obj)) + return W_ListObject.newlist_cpyext(space, space.listview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) @@ -58,7 +59,7 @@ """Return the ith element of o, assuming that o was returned by PySequence_Fast(), o is not NULL, and that i is within bounds. """ - if isinstance(w_obj, listobject.W_ListObject): + if isinstance(w_obj, W_ListObject): return w_obj.getitem(index) elif isinstance(w_obj, tupleobject.W_TupleObject): return w_obj.wrappeditems[index] @@ -72,7 +73,7 @@ gotten by calling PySequence_Size() on o, but PySequence_Fast_GET_SIZE() is faster because it can assume o is a list or tuple.""" - if isinstance(w_obj, listobject.W_ListObject): + if isinstance(w_obj, W_ListObject): return w_obj.length() elif isinstance(w_obj, tupleobject.W_TupleObject): return len(w_obj.wrappeditems) @@ -88,7 +89,7 @@ So, only use the underlying array pointer in contexts where the sequence cannot change. """ - if isinstance(w_obj, listobject.W_ListObject): + if isinstance(w_obj, W_ListObject): cpy_strategy = space.fromcache(CPyListStrategy) if w_obj.strategy is cpy_strategy: return w_obj.get_raw_items() # asserts it's a cpyext strategy @@ -276,9 +277,20 @@ #------------------------------------------ # all these methods fail or switch strategy and then call ListObjectStrategy's method + @jit.unroll_safe + def getitems_unroll(self, w_list): + storage = self.unerase(w_list.lstorage) + retval = [None] * storage._length + for i in range(storage._length): + retval[i] = from_ref(w_list.space, storage._elems[i]) + return retval + + @jit.look_inside_iff(lambda self, w_list: + jit.loop_unrolling_heuristic(w_list, w_list.length(), + UNROLL_CUTOFF)) def getitems_fixedsize(self, w_list): - raise NotImplementedError - + return self.getitems_unroll(w_list) + def setslice(self, w_list, start, stop, step, length): #storage = self.unerase(w_list.lstorage) raise NotImplementedError @@ -290,8 +302,12 @@ raise NotImplementedError def clone(self, w_list): - raise NotImplementedError - + storage = w_list.lstorage # lstorage is tuple, no need to clone + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, + self) + w_clone.switch_to_object_strategy() + return w_clone + def copy_into(self, w_list, w_other): raise NotImplementedError diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -178,6 +178,19 @@ assert space.int_w(space.len(w_l)) == 5 assert space.int_w(space.getitem(w_l, w(0))) == 0 + api.PySequence_Fast(w_l, "foo") # converts + w_t = space.wrap(space.fixedview(w_l)) + assert space.int_w(space.len(w_t)) == 5 + assert space.int_w(space.getitem(w_t, w(0))) == 0 + w_l2 = space.wrap(space.listview(w_t)) + assert space.int_w(space.len(w_l2)) == 5 + assert space.int_w(space.getitem(w_l2, w(0))) == 0 + + api.PySequence_Fast(w_l, "foo") # converts + w_sum = space.add(w_l, w_l) + assert space.int_w(space.len(w_sum)) == 10 + + class XAppTestSequenceObject(AppTestCpythonExtensionBase): def test_sequenceobject(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Fri Apr 1 10:24:59 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 07:24:59 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: test, implement more strategy methods, remove methods implemented in base class Message-ID: <56fe84bb.e213c20a.53491.ffffe8c4@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83497:c945e9acf8f8 Date: 2016-04-01 17:03 +0300 http://bitbucket.org/pypy/pypy/changeset/c945e9acf8f8/ Log: test, implement more strategy methods, remove methods implemented in base class diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -320,18 +320,6 @@ def getitems_copy(self, w_list): raise NotImplementedError - def getitems_bytes(self, w_list): - return None - - def getitems_unicode(self, w_list): - return None - - def getitems_int(self, w_list): - return None - - def getitems_float(self, w_list): - return None - def getstorage_copy(self, w_list): raise NotImplementedError @@ -339,11 +327,9 @@ w_list.switch_to_object_strategy() w_list.strategy.append(w_list, w_item) - def mul(self, w_list, times): - raise NotImplementedError - def inplace_mul(self, w_list, times): - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy.inplace_mul(w_list, times) def deleteslice(self, w_list, start, step, slicelength): raise NotImplementedError diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -190,6 +190,14 @@ w_sum = space.add(w_l, w_l) assert space.int_w(space.len(w_sum)) == 10 + api.PySequence_Fast(w_l, "foo") # converts + w_prod = space.mul(w_l, space.wrap(2)) + assert space.int_w(space.len(w_prod)) == 10 + + api.PySequence_Fast(w_l, "foo") # converts + w_l.inplace_mul(2) + assert space.int_w(space.len(w_l)) == 10 + class XAppTestSequenceObject(AppTestCpythonExtensionBase): def test_sequenceobject(self): From pypy.commits at gmail.com Fri Apr 1 10:34:08 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 07:34:08 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Kill this test, supersceded by tests in test_rewrite Message-ID: <56fe86e0.d4e01c0a.1bcd4.272a@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83498:9f37768db82d Date: 2016-04-01 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/9f37768db82d/ Log: Kill this test, supersceded by tests in test_rewrite diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -196,31 +196,6 @@ assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) - def test_record_constptrs(self): - class MyFakeCPU(object): - def cast_adr_to_int(self, adr): - assert adr == "some fake address" - return 43 - class MyFakeGCRefList(object): - def get_address_of_gcref(self, s_gcref1): - assert s_gcref1 == s_gcref - return "some fake address" - S = lltype.GcStruct('S') - s = lltype.malloc(S) - s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - v_random_box = InputArgRef() - operations = [ - ResOperation(rop.PTR_EQ, [v_random_box, ConstPtr(s_gcref)]), - ] - gc_ll_descr = self.gc_ll_descr - gc_ll_descr.gcrefs = MyFakeGCRefList() - gcrefs = [] - operations = get_deep_immutable_oplist(operations) - operations2 = gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations, - gcrefs) - assert operations2 == operations - assert gcrefs == [s_gcref] - class TestFrameworkMiniMark(TestFramework): gc = 'minimark' From pypy.commits at gmail.com Fri Apr 1 11:20:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 01 Apr 2016 08:20:09 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: exposed enable_jitlog in module _vmprof, JITLOG env variable now controls the jitlog output file (bugfix), setup_once is now called while initializing the metainterp_sd Message-ID: <56fe91a9.cb361c0a.590f3.3c15@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83499:f42dc412ebd9 Date: 2016-04-01 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/f42dc412ebd9/ Log: exposed enable_jitlog in module _vmprof, JITLOG env variable now controls the jitlog output file (bugfix), setup_once is now called while initializing the metainterp_sd diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -10,6 +10,7 @@ interpleveldefs = { 'enable': 'interp_vmprof.enable', + 'enable_jitlog': 'interp_vmprof.enable_jitlog', 'disable': 'interp_vmprof.disable', 'write_all_code_objects': 'interp_vmprof.write_all_code_objects', 'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError', diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -69,6 +69,14 @@ except rvmprof.VMProfError, e: raise VMProfError(space, e) + at unwrap_spec(fileno=int) +def enable_jitlog(space, fileno): + """ Enable PyPy's logging facility. """ + try: + rvmprof.enable_jitlog(fileno) + except rvmprof.VMProfError, e: + raise VMProfError(space, e) + def write_all_code_objects(space): """ Needed on cpython, just empty function here """ diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py --- a/rpython/jit/backend/x86/test/test_jitlog.py +++ b/rpython/jit/backend/x86/test/test_jitlog.py @@ -1,4 +1,5 @@ import re +import os from rpython.rlib import debug from rpython.jit.tool.oparser import pure_parse from rpython.jit.metainterp import logger @@ -15,13 +16,34 @@ class TestLogger(Jit386Mixin): - def test_log_loop(self): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) + def test_explicit_enable(self): vmprof = rvmprof.VMProf() fileno, name = tempfile.mkstemp() + self.run_sample_loop(lambda: vmprof.enable_jitlog(fileno)) + assert os.path.exists(name) + with open(name, 'rb') as fd: + # check the file header + assert fd.read(3) == '\x23\xfe\xaf' + assert len(fd.read()) > 0 + print(name) + + def test_venv(self): + fileno, name = tempfile.mkstemp() + os.environ["JITLOG"] = name + self.run_sample_loop(None) + assert os.path.exists(name) + with open(name, 'rb') as fd: + # check the file header + assert fd.read(3) == '\x23\xfe\xaf' + assert len(fd.read()) > 0 + print(name) + + def run_sample_loop(self, func): + myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) def f(x, y): res = 0 - vmprof.enable(fileno, 0.1) + if func: + func() while y > 0: myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) @@ -34,4 +56,3 @@ return res res = self.meta_interp(f, [6, 20]) self.check_trace_count(2) - print(name) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -29,6 +29,8 @@ MARK_JIT_BRIDGE_COUNTER = 0x21 MARK_JIT_ENTRY_COUNTER = 0x22 +MARK_JITLOG_HEADER = 0x23 + IS_32_BIT = sys.maxint == 2**31-1 @always_inline @@ -69,10 +71,16 @@ self.is_setup = False def setup_once(self): + if self.is_setup: + return self.is_setup = True self.cintf.jitlog_try_init_using_env() if not self.cintf.jitlog_enabled(): return + + header = encode_le_16bit(0xaffe) + self._write_marked(MARK_JITLOG_HEADER, header) + count = len(resoperations.opname) mark = MARK_RESOP_META for opnum, opname in resoperations.opname.items(): @@ -81,12 +89,11 @@ def teardown(self): self.cintf.jitlog_teardown() + self.is_setup = False def _write_marked(self, mark, line): if not we_are_translated(): assert self.cintf.jitlog_enabled() - if not self.is_setup: - self.setup_once() self.cintf.jitlog_write_marked(mark, line, len(line)) def log_jit_counter(self, struct): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1819,6 +1819,7 @@ def _setup_once(self): """Runtime setup needed by the various components of the JIT.""" if not self.globaldata.initialized: + self.jitlog.setup_once() debug_print(self.jit_starting_line) self.cpu.setup_once() if not self.profiler.initialized: diff --git a/rpython/rlib/rvmprof/__init__.py b/rpython/rlib/rvmprof/__init__.py --- a/rpython/rlib/rvmprof/__init__.py +++ b/rpython/rlib/rvmprof/__init__.py @@ -35,5 +35,8 @@ def enable(fileno, interval): _get_vmprof().enable(fileno, interval) +def enable_jitlog(fileno): + _get_vmprof().enable_jitlog(fileno) + def disable(): _get_vmprof().disable() diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -57,11 +57,10 @@ _nowrapper=True) # jit log functions - jitlog_init = rffi.llexternal("jitlog_init", [rffi.INT, rffi.CCHARP], + jitlog_init = rffi.llexternal("jitlog_init", [rffi.INT], rffi.CCHARP, compilation_info=eci) jitlog_try_init_using_env = rffi.llexternal("jitlog_try_init_using_env", - [], lltype.Void, compilation_info=eci, - releasegil=False) + [], lltype.Void, compilation_info=eci) jitlog_write_marked = rffi.llexternal("jitlog_write_marked", [rffi.INT, rffi.CCHARP, rffi.INT], lltype.Void, compilation_info=eci, diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -109,17 +109,15 @@ if p_error: raise VMProfError(rffi.charp2str(p_error)) - self.enable_jitlog(fileno, "") - self._gather_all_code_objs() res = self.cintf.vmprof_enable() if res < 0: raise VMProfError(os.strerror(rposix.get_saved_errno())) self.is_enabled = True - def enable_jitlog(self, fileno, regexp): + def enable_jitlog(self, fileno): # initialize the jit log - p_error = self.cintf.jitlog_init(fileno, regexp) + p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h --- a/rpython/rlib/rvmprof/src/jitlog_main.h +++ b/rpython/rlib/rvmprof/src/jitlog_main.h @@ -5,7 +5,6 @@ #include static int jitlog_fd = -1; -static char * jitlog_prefix = NULL; static int jitlog_ready = 0; RPY_EXTERN @@ -25,7 +24,8 @@ mode_t mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH; jitlog_fd = open(filename, O_WRONLY | O_CREAT, mode); if (jitlog_fd == -1) { - perror("could not open"); + dprintf(2, "could not open '%s': ", filename); + perror(NULL); exit(-1); } } else { @@ -41,10 +41,9 @@ } RPY_EXTERN -char *jitlog_init(int fd, const char * prefix) +char *jitlog_init(int fd) { jitlog_fd = fd; - jitlog_prefix = strdup(prefix); jitlog_ready = 1; return NULL; } @@ -59,10 +58,6 @@ // close the jitlog file descriptor close(jitlog_fd); jitlog_fd = -1; - // free the prefix - if (jitlog_prefix != NULL) { - free(jitlog_prefix); - } } RPY_EXTERN diff --git a/rpython/rlib/rvmprof/src/rvmprof.h b/rpython/rlib/rvmprof/src/rvmprof.h --- a/rpython/rlib/rvmprof/src/rvmprof.h +++ b/rpython/rlib/rvmprof/src/rvmprof.h @@ -9,7 +9,7 @@ RPY_EXTERN long vmprof_stack_pop(void*); RPY_EXTERN void vmprof_stack_free(void*); -RPY_EXTERN char * jitlog_init(int, const char*); +RPY_EXTERN char * jitlog_init(int); RPY_EXTERN void jitlog_try_init_using_env(void); RPY_EXTERN int jitlog_enabled(); RPY_EXTERN void jitlog_write_marked(int, char*, int); From pypy.commits at gmail.com Fri Apr 1 12:17:43 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 09:17:43 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Boehm support: the custom tracer won't work there, and the memory may Message-ID: <56fe9f27.838d1c0a.a7662.52c4@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83500:aa7ba84109cb Date: 2016-04-01 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/aa7ba84109cb/ Log: Boehm support: the custom tracer won't work there, and the memory may not be scanned by Boehm itself diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -345,6 +345,13 @@ arraydescr.itemsize, arraydescr.lendescr.offset) + def make_gcref_tracer(self, array_base_addr, gcrefs): + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_boehm_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + pass # nothing needed + # ____________________________________________________________ # All code below is for the hybrid or minimark GC @@ -755,6 +762,13 @@ p = rffi.cast(rffi.CCHARP, p) return (ord(p[0]) & IS_OBJECT_FLAG) != 0 + def make_gcref_tracer(self, array_base_addr, gcrefs): + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_framework_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + tracer.array_length = 0 + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py --- a/rpython/jit/backend/llsupport/gcreftracer.py +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -21,7 +21,7 @@ i += 1 lambda_gcrefs_trace = lambda: gcrefs_trace -def make_gcref_tracer(array_base_addr, gcrefs): +def make_framework_tracer(array_base_addr, gcrefs): # careful about the order here: the allocation of the GCREFTRACER # can trigger a GC. So we must write the gcrefs into the raw # array only afterwards... @@ -39,3 +39,11 @@ llop.gc_writebarrier(lltype.Void, tr) # --no GC until here-- return tr + +def make_boehm_tracer(array_base_addr, gcrefs): + # copy the addresses, but return 'gcrefs' as the object that must be + # kept alive + for i in range(len(gcrefs)): + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + return gcrefs diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -251,7 +251,7 @@ if tracers is not None: compiled_loop_token.asmmemmgr_gcreftracers = None for tracer in tracers: - tracer.array_length = 0 + self.gc_ll_descr.clear_gcref_tracer(tracer) # then free all blocks of code and raw data blocks = compiled_loop_token.asmmemmgr_blocks if blocks is not None: diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py --- a/rpython/jit/backend/llsupport/test/test_gcreftracer.py +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -1,6 +1,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.jit.backend.llsupport.gcreftracer import GCREFTRACER, gcrefs_trace -from rpython.jit.backend.llsupport.gcreftracer import make_gcref_tracer +from rpython.jit.backend.llsupport.gcreftracer import make_framework_tracer +from rpython.jit.backend.llsupport.gcreftracer import make_boehm_tracer class FakeGC: @@ -29,13 +30,24 @@ assert gc.called[i] == rffi.cast(llmemory.Address, base + i * WORD) lltype.free(a, flavor='raw') -def test_make_gcref_tracer(): +def test_make_framework_tracer(): a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') base = rffi.cast(lltype.Signed, a) - tr = make_gcref_tracer(base, [123, 456, 789]) + tr = make_framework_tracer(base, [123, 456, 789]) assert a[0] == 123 assert a[1] == 456 assert a[2] == 789 assert tr.array_base_addr == base assert tr.array_length == 3 lltype.free(a, flavor='raw') + +def test_make_boehm_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + lst = [123, 456, 789] + tr = make_boehm_tracer(base, lst) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr is lst + lltype.free(a, flavor='raw') diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2,7 +2,7 @@ import os import py -from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite, gcreftracer +from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, DEBUG_COUNTER) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper @@ -702,7 +702,8 @@ # the gc table was already allocated by reserve_gcref_table() rawstart = self.gc_table_addr # - tracer = gcreftracer.make_gcref_tracer(rawstart, self._allgcrefs) + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) gcreftracers.append(tracer) # keepalive self.teardown_gcrefs_list() From pypy.commits at gmail.com Fri Apr 1 13:23:11 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 10:23:11 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: tweak Message-ID: <56feae7f.a151c20a.f09b0.34c4@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83501:8245b29790fa Date: 2016-04-01 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/8245b29790fa/ Log: tweak diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1433,14 +1433,13 @@ return self.gc_table_addr + index * WORD def genop_load_from_gc_table(self, op, arglocs, resloc): - [loc] = arglocs - assert isinstance(loc, ImmedLoc) + index = op.getarg(0).getint() assert isinstance(resloc, RegLoc) if IS_X86_64: self.mc.MOV_rp(resloc.value, 0) # %rip-relative - self._patch_load_from_gc_table(loc.value) + self._patch_load_from_gc_table(index) elif IS_X86_32: - self.mc.MOV_rj(resloc.value, self._addr_from_gc_table(loc.value)) + self.mc.MOV_rj(resloc.value, self._addr_from_gc_table(index)) def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1135,7 +1135,10 @@ consider_same_as_i = _consider_same_as consider_same_as_r = _consider_same_as consider_same_as_f = _consider_same_as - consider_load_from_gc_table = _consider_same_as + + def consider_load_from_gc_table(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) def consider_int_force_ge_zero(self, op): argloc = self.make_sure_var_in_reg(op.getarg(0)) From pypy.commits at gmail.com Fri Apr 1 13:23:13 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 10:23:13 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: (untested) ARM support Message-ID: <56feae81.4412c30a.d6cca.2fba@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83502:646421afc5c5 Date: 2016-04-01 19:14 +0200 http://bitbucket.org/pypy/pypy/changeset/646421afc5c5/ Log: (untested) ARM support diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -14,7 +14,7 @@ CoreRegisterManager, check_imm_arg, VFPRegisterManager, operations as regalloc_operations) from rpython.jit.backend.llsupport import jitframe, rewrite -from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, debug_bridge, BaseAssembler +from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale, valid_addressing_size from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.model import CompiledLoopToken @@ -481,8 +481,9 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.regalloc_push(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + self.load_from_gc_table(r.ip.value, faildescrindex) + self.regalloc_push(r.ip) self.push_gcmap(self.mc, gcmap=guardtok.gcmap, push=True) self.mc.BL(target) return startpos @@ -596,20 +597,22 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) + regalloc = Regalloc(assembler=self) + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, looptoken, + allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() + self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - regalloc = Regalloc(assembler=self) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) - loop_head = self.mc.get_relative_pos() looptoken._ll_loop_code = loop_head # @@ -620,9 +623,11 @@ self.write_pending_failure_recoveries() + full_size = self.mc.get_relative_pos() rawstart = self.materialize_loop(looptoken) - looptoken._function_addr = looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos + self.patch_gcref_table(looptoken, rawstart) self.process_pending_guards(rawstart) self.fixup_target_tokens(rawstart) @@ -641,7 +646,13 @@ looptoken.number, loopname, r_uint(rawstart + loop_head), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + loop_head)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") return AsmInfo(ops_offset, rawstart + loop_head, @@ -678,27 +689,43 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() + fullsize = self.mc.get_relative_pos() rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.process_pending_guards(rawstart) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") + # patch the jump from original guard self.patch_trace(faildescr, original_loop_token, - rawstart, regalloc) + rawstart + startpos, regalloc) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) @@ -716,9 +743,53 @@ ops_offset=ops_offset) self.teardown() - debug_bridge(descr_number, rawstart, codeendpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (see load_from_gc_table()) + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + # the gc table is at the start of the machine code. Fill it now + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def load_from_gc_table(self, regnum, index): + """emits either: + LDR Rt, [PC, #offset] if -4095 <= offset + or: + gen_load_int(Rt, offset) + LDR Rt, [PC, Rt] for larger offsets + """ + mc = self.mc + address_in_buffer = index * WORD # at the start of the buffer + offset = address_in_buffer - (mc.get_relative_pos() + 8) # negative + if offset >= -4095: + mc.LDR_ri(regnum, r.pc.value, offset) + else: + # The offset we're loading is negative: right now, + # gen_load_int() will always use exactly + # get_max_size_of_gen_load_int() instructions. No point + # in optimizing in case we get less. Just in case though, + # we check and pad with nops. + extra_bytes = mc.get_max_size_of_gen_load_int() * 2 + offset -= extra_bytes + start = mc.get_relative_pos() + mc.gen_load_int(regnum, offset) + while mc.get_relative_pos() != start + extra_bytes: + mc.NOP() + mc.LDR_rr(regnum, r.pc.value, regnum) def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -929,6 +1000,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def _walk_operations(self, inputargs, operations, regalloc): fcond = c.AL self._regalloc = regalloc diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -35,9 +35,9 @@ class ArmGuardToken(GuardToken): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - offset, guard_opnum, frame_depth, fcond=c.AL): + offset, guard_opnum, frame_depth, faildescrindex, fcond=c.AL): GuardToken.__init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond self.offset = offset @@ -178,6 +178,7 @@ assert isinstance(descr, AbstractFailDescr) gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ArmGuardToken(self.cpu, gcmap, descr, failargs=op.getfailargs(), @@ -185,6 +186,7 @@ offset=offset, guard_opnum=op.getopnum(), frame_depth=frame_depth, + faildescrindex=faildescrindex, fcond=fcond) return token @@ -398,14 +400,13 @@ def emit_op_finish(self, op, arglocs, regalloc, fcond): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs self.store_reg(self.mc, return_val, r.fp, base_ofs) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.gen_load_int(r.ip.value, fail_descr_loc.value) + faildescrindex = self.get_gcref_from_faildescr(op.getdescr()) + self.load_from_gc_table(r.ip.value, faildescrindex) # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: @@ -1035,9 +1036,9 @@ assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(faildescr) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - value = rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr)) - self.mc.gen_load_int(r.ip.value, value) + self.load_from_gc_table(r.ip.value, faildescrindex) self.store_reg(self.mc, r.ip, r.fp, ofs) def _find_nearby_operation(self, delta): @@ -1250,3 +1251,9 @@ self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond + + def emit_op_load_from_gc_table(self, op, arglocs, regalloc, fcond): + res_loc, = arglocs + index = op.getarg(0).getint() + self.load_from_gc_table(res_loc.value, index) + return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1,5 +1,4 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.rlib import rgc from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.jit.backend.llsupport.regalloc import FrameManager, \ RegisterManager, TempVar, compute_vars_longevity, BaseRegalloc, \ @@ -627,16 +626,11 @@ def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def load_condition_into_cc(self, box): @@ -892,6 +886,10 @@ prepare_op_same_as_r = _prepare_op_same_as prepare_op_same_as_f = _prepare_op_same_as + def prepare_op_load_from_gc_table(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + def prepare_op_call_malloc_nursery(self, op, fcond): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) From pypy.commits at gmail.com Fri Apr 1 15:36:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 01 Apr 2016 12:36:42 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3 Message-ID: <56fecdca.d7b81c0a.6028b.2ac5@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83503:1ca73c09051e Date: 2016-03-31 21:08 +0100 http://bitbucket.org/pypy/pypy/changeset/1ca73c09051e/ Log: hg merge rposix-for-3 diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -603,14 +603,44 @@ config = rffi_platform.configure(CConfig) DIRENT = config['DIRENT'] DIRENTP = lltype.Ptr(DIRENT) - c_opendir = external('opendir', [rffi.CCHARP], DIRP, - save_err=rffi.RFFI_SAVE_ERRNO) + c_opendir = external('opendir', + [rffi.CCHARP], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) + c_fdopendir = external('fdopendir', + [rffi.INT], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) # XXX macro=True is hack to make sure we get the correct kind of # dirent struct (which depends on defines) c_readdir = external('readdir', [DIRP], DIRENTP, macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_closedir = external('closedir', [DIRP], rffi.INT) +def _listdir(dirp): + result = [] + while True: + direntp = c_readdir(dirp) + if not direntp: + error = get_saved_errno() + break + namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) + name = rffi.charp2str(namep) + if name != '.' and name != '..': + result.append(name) + c_closedir(dirp) + if error: + raise OSError(error, "readdir failed") + return result + +def fdlistdir(dirfd): + """ + Like listdir(), except that the directory is specified as an open + file descriptor. + + Note: fdlistdir() closes the file descriptor. + """ + dirp = c_fdopendir(dirfd) + if not dirp: + raise OSError(get_saved_errno(), "opendir failed") + return _listdir(dirp) + @replace_os_function('listdir') @specialize.argtype(0) def listdir(path): @@ -619,20 +649,7 @@ dirp = c_opendir(path) if not dirp: raise OSError(get_saved_errno(), "opendir failed") - result = [] - while True: - direntp = c_readdir(dirp) - if not direntp: - error = get_saved_errno() - break - namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) - name = rffi.charp2str(namep) - if name != '.' and name != '..': - result.append(name) - c_closedir(dirp) - if error: - raise OSError(error, "readdir failed") - return result + return _listdir(dirp) else: # _WIN32 case traits = _preferred_traits(path) win32traits = make_win32_traits(traits) @@ -1801,6 +1818,26 @@ error = c_fchownat(dir_fd, path, owner, group, flag) handle_posix_error('fchownat', error) +if HAVE_FEXECVE: + c_fexecve = external('fexecve', + [rffi.INT, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def fexecve(fd, args, env): + envstrs = [] + for item in env.iteritems(): + envstr = "%s=%s" % item + envstrs.append(envstr) + + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + c_fexecve(fd, l_args, l_env) + + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execve failed") + if HAVE_LINKAT: c_linkat = external('linkat', [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -11,6 +11,8 @@ return py.test.mark.skipif(not hasattr(rposix, funcname), reason="Requires rposix.%s()" % funcname) +win_only = py.test.mark.skipif("os.name != 'nt'") + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -33,9 +35,8 @@ for value in times: assert isinstance(value, float) + @py.test.mark.skipif("not hasattr(os, 'getlogin')") def test_getlogin(self): - if not hasattr(os, 'getlogin'): - py.test.skip('posix specific function') try: expected = os.getlogin() except OSError, e: @@ -43,9 +44,8 @@ data = rposix.getlogin() assert data == expected + @win_only def test_utimes(self): - if os.name != 'nt': - py.test.skip('Windows specific feature') # Windows support centiseconds def f(fname, t1): os.utime(fname, (t1, t1)) @@ -55,15 +55,12 @@ t1 = 1159195039.25 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') t1 = 5000000000.0 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime + @win_only def test__getfullpathname(self): - if os.name != 'nt': - py.test.skip('nt specific function') posix = __import__(os.name) sysdrv = os.getenv('SystemDrive', 'C:') stuff = sysdrv + 'stuff' @@ -134,10 +131,8 @@ os.unlink(filename) + @py.test.mark.skipif("os.name != 'posix'") def test_execve(self): - if os.name != 'posix': - py.test.skip('posix specific function') - EXECVE_ENV = {"foo": "bar", "baz": "quux"} def run_execve(program, args=None, env=None, do_path_lookup=False): @@ -276,11 +271,8 @@ assert rposix.isatty(-1) is False + at py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): - def setup_class(cls): - if not hasattr(os, 'ttyname'): - py.test.skip("no ttyname") - def test_ttyname(self): def f(): import os @@ -444,9 +436,8 @@ except Exception: pass + @win_only def test_is_valid_fd(self): - if os.name != 'nt': - py.test.skip('relevant for windows only') assert rposix.is_valid_fd(0) == 1 fid = open(str(udir.join('validate_test.txt')), 'w') fd = fid.fileno() @@ -537,6 +528,14 @@ os.open(u'/tmp/t', 0, 0) compile(f, ()) + +def test_fdlistdir(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + result = rposix.fdlistdir(dirfd) + # Note: fdlistdir() always closes dirfd + assert result == ['file'] + def test_symlinkat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) @@ -546,7 +545,6 @@ finally: os.close(dirfd) - def test_renameat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) From pypy.commits at gmail.com Fri Apr 1 15:36:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 01 Apr 2016 12:36:44 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Update execve() docstring Message-ID: <56fecdcc.918e1c0a.54fa1.29e4@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83504:befb634cf35b Date: 2016-03-31 21:20 +0100 http://bitbucket.org/pypy/pypy/changeset/befb634cf35b/ Log: Update execve() docstring diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1142,7 +1142,8 @@ def _exit(space, status): os._exit(status) -def execv(space, w_command, w_args): +def execv(space, w_path, w_args): + """ execv(path, args) Execute an executable path with arguments, replacing current process. @@ -1150,7 +1151,7 @@ path: path of executable file args: iterable of strings """ - execve(space, w_command, w_args, None) + execve(space, w_path, w_args, None) def _env2interp(space, w_env): env = {} @@ -1160,16 +1161,20 @@ env[space.fsencode_w(w_key)] = space.fsencode_w(w_value) return env -def execve(space, w_command, w_args, w_env): - """ execve(path, args, env) +def execve(space, w_path, w_args, w_env): + """execve(path, args, env) Execute a path with arguments and environment, replacing current process. - path: path of executable file - args: iterable of arguments - env: dictionary of strings mapping to strings + path: path of executable file + args: tuple or list of arguments + env: dictionary of strings mapping to strings + +On some platforms, you may specify an open file descriptor for path; + execve will execute the program the file descriptor is open to. + If this functionality is unavailable, using it raises NotImplementedError. """ - command = space.fsencode_w(w_command) + command = space.fsencode_w(w_path) try: args_w = space.unpackiterable(w_args) if len(args_w) < 1: From pypy.commits at gmail.com Fri Apr 1 15:36:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 01 Apr 2016 12:36:46 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Separate implementations of execv() and execve() since their signature and validation logic differ Message-ID: <56fecdce.90051c0a.85c37.27dd@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83505:9dc07d8a2654 Date: 2016-04-01 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/9dc07d8a2654/ Log: Separate implementations of execv() and execve() since their signature and validation logic differ diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1151,7 +1151,23 @@ path: path of executable file args: iterable of strings """ - execve(space, w_path, w_args, None) + command = space.fsencode_w(w_path) + try: + args_w = space.unpackiterable(w_args) + if len(args_w) < 1: + raise oefmt(space.w_ValueError, + "execv() arg 2 must not be empty") + args = [space.fsencode_w(w_arg) for w_arg in args_w] + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise oefmt(space.w_TypeError, + "execv() arg 2 must be an iterable of strings") + try: + os.execv(command, args) + except OSError as e: + raise wrap_oserror(space, e) + def _env2interp(space, w_env): env = {} @@ -1161,7 +1177,8 @@ env[space.fsencode_w(w_key)] = space.fsencode_w(w_value) return env -def execve(space, w_path, w_args, w_env): + +def execve(space, w_path, w_argv, w_environment): """execve(path, args, env) Execute a path with arguments and environment, replacing current process. @@ -1175,29 +1192,16 @@ If this functionality is unavailable, using it raises NotImplementedError. """ command = space.fsencode_w(w_path) + if not (space.isinstance_w(w_argv, space.w_list) + or space.isinstance_w(w_argv, space.w_tuple)): + raise oefmt(space.w_TypeError, + "execve: argv must be a tuple or a list") + args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] + env = _env2interp(space, w_environment) try: - args_w = space.unpackiterable(w_args) - if len(args_w) < 1: - w_msg = space.wrap("execv() must have at least one argument") - raise OperationError(space.w_ValueError, w_msg) - args = [space.fsencode_w(w_arg) for w_arg in args_w] - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - msg = "execv() arg 2 must be an iterable of strings" - raise OperationError(space.w_TypeError, space.wrap(str(msg))) - # - if w_env is None: # when called via execv() above - try: - os.execv(command, args) - except OSError, e: - raise wrap_oserror(space, e) - else: - env = _env2interp(space, w_env) - try: - os.execve(command, args, env) - except OSError, e: - raise wrap_oserror(space, e) + os.execve(command, args, env) + except OSError, e: + raise wrap_oserror(space, e) @unwrap_spec(mode=int, path='fsencode') def spawnv(space, mode, path, w_args): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -415,7 +415,6 @@ def test_execv_no_args(self): os = self.posix raises(ValueError, os.execv, "notepad", []) - raises(ValueError, os.execve, "notepad", [], {}) def test_execv_raising2(self): os = self.posix From pypy.commits at gmail.com Fri Apr 1 15:56:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 01 Apr 2016 12:56:42 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Allow path as file descriptor in execve() Message-ID: <56fed27a.4816c20a.5f157.6098@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83506:a64857baf5ce Date: 2016-04-01 20:55 +0100 http://bitbucket.org/pypy/pypy/changeset/a64857baf5ce/ Log: Allow path as file descriptor in execve() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1191,7 +1191,6 @@ execve will execute the program the file descriptor is open to. If this functionality is unavailable, using it raises NotImplementedError. """ - command = space.fsencode_w(w_path) if not (space.isinstance_w(w_argv, space.w_list) or space.isinstance_w(w_argv, space.w_tuple)): raise oefmt(space.w_TypeError, @@ -1199,9 +1198,25 @@ args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] env = _env2interp(space, w_environment) try: - os.execve(command, args, env) - except OSError, e: - raise wrap_oserror(space, e) + path = space.fsencode_w(w_path) + except OperationError: + if not rposix.HAVE_FEXECVE: + raise oefmt(space.w_TypeError, + "execve: illegal type for path argument") + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + # File descriptor case + fd = unwrap_fd(space, w_path) + try: + rposix.fexecve(fd, args, env) + except OSError as e: + raise wrap_oserror(space, e) + else: + try: + os.execve(path, args, env) + except OSError as e: + raise wrap_oserror(space, e) @unwrap_spec(mode=int, path='fsencode') def spawnv(space, mode, path, w_args): From pypy.commits at gmail.com Fri Apr 1 18:09:55 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Apr 2016 15:09:55 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: fix(?) Message-ID: <56fef1b3.03321c0a.45a15.508e@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83507:af7d68248897 Date: 2016-04-02 00:09 +0200 http://bitbucket.org/pypy/pypy/changeset/af7d68248897/ Log: fix(?) diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -557,7 +557,7 @@ debug_stop('jit-backend-ops') def _call_header(self): - assert self.mc.currpos() == 0 + # there is the gc table before this point self.gen_func_prolog() def _call_header_with_stack_check(self): From pypy.commits at gmail.com Sat Apr 2 02:58:26 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 01 Apr 2016 23:58:26 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: finish all CPyListStrategy methods Message-ID: <56ff6d92.d3301c0a.cc2f0.4a3d@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83508:29f300d480a2 Date: 2016-04-02 01:57 +0300 http://bitbucket.org/pypy/pypy/changeset/29f300d480a2/ Log: finish all CPyListStrategy methods diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -274,9 +274,6 @@ retval[i] = from_ref(w_list.space, storage._elems[i]) return retval - #------------------------------------------ - # all these methods fail or switch strategy and then call ListObjectStrategy's method - @jit.unroll_safe def getitems_unroll(self, w_list): storage = self.unerase(w_list.lstorage) @@ -291,9 +288,12 @@ def getitems_fixedsize(self, w_list): return self.getitems_unroll(w_list) + #------------------------------------------ + # all these methods fail or switch strategy and then call ListObjectStrategy's method + def setslice(self, w_list, start, stop, step, length): - #storage = self.unerase(w_list.lstorage) - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy.setslice(w_list, start, stop, step, length) def get_sizehint(self): return -1 @@ -309,16 +309,19 @@ return w_clone def copy_into(self, w_list, w_other): - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy.copy_into(w_list, w_other) def _resize_hint(self, w_list, hint): - raise NotImplementedError + pass def find(self, w_list, w_item, start, stop): - raise NotImplementedError + w_list.switch_to_object_strategy() + return w_list.strategy.find(w_list, w_item, start, stop) def getitems_copy(self, w_list): - raise NotImplementedError + w_list.switch_to_object_strategy() + return w_list.strategy.getitems_copy(w_list) def getstorage_copy(self, w_list): raise NotImplementedError @@ -332,7 +335,8 @@ w_list.strategy.inplace_mul(w_list, times) def deleteslice(self, w_list, start, step, slicelength): - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy.deleteslice(w_list, start, step, slicelength) def pop(self, w_list, index): w_list.switch_to_object_strategy() @@ -351,10 +355,12 @@ w_list.strategy.extend(w_list, w_any) def _extend_from_list(self, w_list, w_other): - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy._extend_from_list(w_list, w_other) def _extend_from_iterable(self, w_list, w_iterable): - raise NotImplementedError + w_list.switch_to_object_strategy() + w_list.strategy._extend_from_iterable(w_list, w_iterable) def reverse(self, w_list): w_list.switch_to_object_strategy() From pypy.commits at gmail.com Sat Apr 2 04:27:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Apr 2016 01:27:24 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Test fixes, including moving the two tests about pinned objects to Message-ID: <56ff826c.0173c20a.ea565.ffffa0d9@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83509:3b8c459d92f7 Date: 2016-04-02 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/3b8c459d92f7/ Log: Test fixes, including moving the two tests about pinned objects to test_rewrite.py: we don't need any special case about pinned objects any more. Just check that it still gives mostly the same results in terms of load_from_gc_table. diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -247,6 +247,14 @@ """ return jitframe.JITFRAME.allocate(frame_info) + def make_gcref_tracer(self, array_base_addr, gcrefs): + # for tests, or for Boehm. Overridden for framework GCs + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_boehm_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + pass # nothing needed unless overridden + class JitFrameDescrs: def _freeze_(self): return True @@ -345,13 +353,6 @@ arraydescr.itemsize, arraydescr.lendescr.offset) - def make_gcref_tracer(self, array_base_addr, gcrefs): - from rpython.jit.backend.llsupport import gcreftracer - return gcreftracer.make_boehm_tracer(array_base_addr, gcrefs) - - def clear_gcref_tracer(self, tracer): - pass # nothing needed - # ____________________________________________________________ # All code below is for the hybrid or minimark GC diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ /dev/null @@ -1,149 +0,0 @@ -from test_rewrite import get_size_descr, get_array_descr, get_description, BaseFakeCPU -from rpython.jit.backend.llsupport.descr import get_size_descr,\ - get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescr, get_interiorfield_descr -from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ - GcLLDescr_framework, MovableObjectTracker -from rpython.jit.backend.llsupport import jitframe, gc -from rpython.jit.metainterp.gc import get_description -from rpython.jit.tool.oparser import parse -from rpython.jit.metainterp.optimizeopt.util import equaloplists -from rpython.jit.metainterp.history import JitCellToken, FLOAT -from rpython.rtyper.lltypesystem import lltype, rffi, lltype, llmemory -from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD -from rpython.rlib import rgc - -class Evaluator(object): - def __init__(self, scope): - self.scope = scope - def __getitem__(self, key): - return eval(key, self.scope) - - -class FakeLoopToken(object): - pass - -# The following class is based on rpython.jit.backend.llsupport.test.test_rewrite.RewriteTests. -# It's modified to be able to test the object pinning specific features. -class RewriteTests(object): - def check_rewrite(self, frm_operations, to_operations, **namespace): - # objects to use inside the test - A = lltype.GcArray(lltype.Signed) - adescr = get_array_descr(self.gc_ll_descr, A) - adescr.tid = 4321 - alendescr = adescr.lendescr - # - pinned_obj_type = lltype.GcStruct('PINNED_STRUCT', ('my_int', lltype.Signed)) - pinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, pinned_obj_type, 'my_int') - pinned_obj_ptr = lltype.malloc(pinned_obj_type) - pinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, pinned_obj_ptr) - assert rgc.pin(pinned_obj_gcref) - # - notpinned_obj_type = lltype.GcStruct('NOT_PINNED_STRUCT', ('my_int', lltype.Signed)) - notpinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, notpinned_obj_type, 'my_int') - notpinned_obj_ptr = lltype.malloc(notpinned_obj_type) - notpinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, notpinned_obj_ptr) - # - ptr_array_descr = self.cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - # - vtable_descr = self.gc_ll_descr.fielddescr_vtable - O = lltype.GcStruct('O', ('parent', rclass.OBJECT), - ('x', lltype.Signed)) - o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - # - tiddescr = self.gc_ll_descr.fielddescr_tid - wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] - # - strdescr = self.gc_ll_descr.str_descr - unicodedescr = self.gc_ll_descr.unicode_descr - strlendescr = strdescr.lendescr - unicodelendescr = unicodedescr.lendescr - - casmdescr = JitCellToken() - clt = FakeLoopToken() - clt._ll_initial_locs = [0, 8] - frame_info = lltype.malloc(jitframe.JITFRAMEINFO, flavor='raw') - clt.frame_info = frame_info - frame_info.jfi_frame_depth = 13 - frame_info.jfi_frame_size = 255 - framedescrs = self.gc_ll_descr.getframedescrs(self.cpu) - framelendescr = framedescrs.arraydescr.lendescr - jfi_frame_depth = framedescrs.jfi_frame_depth - jfi_frame_size = framedescrs.jfi_frame_size - jf_frame_info = framedescrs.jf_frame_info - signedframedescr = self.cpu.signedframedescr - floatframedescr = self.cpu.floatframedescr - casmdescr.compiled_loop_token = clt - tzdescr = None # noone cares - # - namespace.update(locals()) - # - for funcname in self.gc_ll_descr._generated_functions: - namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) - namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, - '%s_descr' % funcname) - # - ops = parse(frm_operations, namespace=namespace) - operations = self.gc_ll_descr.rewrite_assembler(self.cpu, - ops.operations, - []) - # make the array containing the GCREF's accessible inside the tests. - # This must be done after we call 'rewrite_assembler'. Before that - # call 'last_moving_obj_tracker' is None or filled with some old - # value. - namespace['ptr_array_gcref'] = self.gc_ll_descr.last_moving_obj_tracker.ptr_array_gcref - expected = parse(to_operations % Evaluator(namespace), - namespace=namespace) - equaloplists(operations, expected.operations) - lltype.free(frame_info, flavor='raw') - -class TestFramework(RewriteTests): - def setup_method(self, meth): - class config_(object): - class translation(object): - gc = 'minimark' - gcrootfinder = 'asmgcc' - gctransformer = 'framework' - gcremovetypeptr = False - gcdescr = get_description(config_) - self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, - really_not_translated=True) - self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( - lambda cpu: True) - # - class FakeCPU(BaseFakeCPU): - def sizeof(self, STRUCT, is_object): - descr = SizeDescr(104) - descr.tid = 9315 - descr.vtable = 12 - return descr - self.cpu = FakeCPU() - - def test_simple_getfield(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 - - def test_simple_getfield_twice(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - i1 = getfield_gc_i(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr) - i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) - p2 = gc_load_r(ConstPtr(ptr_array_gcref), %(1 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1368,6 +1368,37 @@ """) assert self.gcrefs == [self.myR1, self.myR1b] + def test_pinned_simple_getfield(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1] + + def test_pinned_simple_getfield_twice(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + i1 = getfield_gc_i(ConstPtr(myR1b), descr=xdescr) + i2 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + p2 = load_from_gc_table(1) + i1 = gc_load_i(p2, %(xdescr.offset)s, -%(xdescr.field_size)s) + i2 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1, self.myR1b] + def test_guard_in_gcref(self): self.check_rewrite(""" [i1, i2] From pypy.commits at gmail.com Sat Apr 2 10:08:54 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:08:54 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: start my talk Message-ID: <56ffd276.a151c20a.f09b0.76fc@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5618:a1afb047aee1 Date: 2016-04-02 15:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/a1afb047aee1/ Log: start my talk diff --git a/talk/bucharest2016/jit-frontend/author.latex b/talk/bucharest2016/jit-frontend/author.latex new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[PyPy Intro]{PyPy Intro and JIT Frontend} +\author[antocuni] +{Antonio Cuni} + +\institute{Intel at Bucharest} +\date{April 4 2016} diff --git a/talk/bucharest2016/jit-frontend/beamerdefs.txt b/talk/bucharest2016/jit-frontend/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/bucharest2016/jit-frontend/stylesheet.latex b/talk/bucharest2016/jit-frontend/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/stylesheet.latex @@ -0,0 +1,11 @@ +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -0,0 +1,17 @@ +.. include:: beamerdefs.txt + +================================ +PyPy Intro and JIT Frontend +================================ + +About this talk +---------------- + +* What is PyPy? What is RPython? + +* Tracing JIT 101 + +* PyPy JIT frontend and optimizer + + - "how we manage to make things fast" + From pypy.commits at gmail.com Sat Apr 2 10:09:01 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:09:01 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: fix makefile Message-ID: <56ffd27d.aaf8c20a.d6370.ffffde92@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5622:8188a609f5bf Date: 2016-04-02 16:04 +0200 http://bitbucket.org/pypy/extradoc/changeset/8188a609f5bf/ Log: fix makefile diff --git a/talk/bucharest2016/jit-frontend/Makefile b/talk/bucharest2016/jit-frontend/Makefile --- a/talk/bucharest2016/jit-frontend/Makefile +++ b/talk/bucharest2016/jit-frontend/Makefile @@ -3,7 +3,7 @@ # http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py -talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/trace-p0.pdf diagrams/tracetree-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf +talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit @@ -19,11 +19,11 @@ diagrams/tracing-phases-p0.pdf: diagrams/tracing-phases.svg cd diagrams && inkscapeslide.py tracing-phases.svg -diagrams/trace-p0.pdf: diagrams/trace.svg - cd diagrams && inkscapeslide.py trace.svg +# diagrams/trace-p0.pdf: diagrams/trace.svg +# cd diagrams && inkscapeslide.py trace.svg -diagrams/tracetree-p0.pdf: diagrams/tracetree.svg - cd diagrams && inkscapeslide.py tracetree.svg +# diagrams/tracetree-p0.pdf: diagrams/tracetree.svg +# cd diagrams && inkscapeslide.py tracetree.svg diagrams/architecture-p0.pdf: diagrams/architecture.svg cd diagrams && inkscapeslide.py architecture.svg From pypy.commits at gmail.com Sat Apr 2 10:08:56 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:08:56 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add some slides Message-ID: <56ffd278.4c181c0a.e7a94.37ac@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5619:e3308dd709fd Date: 2016-04-02 15:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/e3308dd709fd/ Log: add some slides diff --git a/talk/bucharest2016/jit-frontend/Makefile b/talk/bucharest2016/jit-frontend/Makefile new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/Makefile @@ -0,0 +1,32 @@ +# you can find rst2beamer.py and inkscapeslide.py here: +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/rst2beamer.py +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py + + +talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/trace-p0.pdf diagrams/tracetree-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf + rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf & + +xpdf: talk.pdf + xpdf talk.pdf & + + +diagrams/tracing-phases-p0.pdf: diagrams/tracing-phases.svg + cd diagrams && inkscapeslide.py tracing-phases.svg + +diagrams/trace-p0.pdf: diagrams/trace.svg + cd diagrams && inkscapeslide.py trace.svg + +diagrams/tracetree-p0.pdf: diagrams/tracetree.svg + cd diagrams && inkscapeslide.py tracetree.svg + +diagrams/architecture-p0.pdf: diagrams/architecture.svg + cd diagrams && inkscapeslide.py architecture.svg + +diagrams/pypytrace-p0.pdf: diagrams/pypytrace.svg + cd diagrams && inkscapeslide.py pypytrace.svg diff --git a/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg b/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg @@ -0,0 +1,473 @@ + + + +image/svg+xmlinterp+tracing+compilation+running+cold_guard+compiled_loop+bridge+hot_guard + +Interpretation + + + +Tracing + + +hotloopdetected + + +Compilation + + +Running + + +coldguardfailed + + +enteringcompiledloop + + +guardfailure + + + + + +hot + + +hotguardfailed + + + \ No newline at end of file diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -15,3 +15,135 @@ - "how we manage to make things fast" + +Part 1 +------- + +**PyPy introduction** + +What is PyPy? +-------------- + +* For most people, the final product: + +|scriptsize| + +.. sourcecode:: python + + $ pypy + Python 2.7.10 (173add34cdd2, Mar 15 2016, 23:00:19) + [PyPy 5.1.0-alpha0 with GCC 4.8.4] on linux2 + >>>> import test.pystone + >>>> test.pystone.main() + Pystone(1.1) time for 50000 passes = 0.0473992 + This machine benchmarks at 1.05487e+06 pystones/second + +|end_scriptsize| + +* More in general: a broader project, ecosystem and community + + +PyPy as a project +------------------ + +* ``rpython``: a fancy compiler + + - source code: "statically typed Python with type inference and metaprogramming" + + - fancy features: C-like performance, GC, meta-JIT + + - "like GCC" (it statically produces a binary) + +|pause| + +* ``pypy``: a Python interpreter + + - "like CPython", but written in RPython + + - CPython : GCC = PyPy : RPython + + + +Important fact +--------------- + +* We **did not** write a JIT compiler for Python + +* The "meta JIT" works with all RPython programs + +* The "Python JIT" is automatically generated from the interpreter + +* Writing an interpreter is vastly easier than a compiler + +* Other interpreters: smalltalk, prolog, ruby, php, ... + + +The final product +------------------ + +* ``rpython`` + ``pypy``: the final binary you download and execute + + - a Python interpreter + + - with a GC + + - with a JIT + + - fast + + + +Part 1 +------ + +**Overview of tracing JITs** + + +Assumptions +----------- + +* Pareto Principle (80-20 rule) + + - the 20% of the program accounts for the 80% of the runtime + + - **hot-spots** + +* Fast Path principle + + - optimize only what is necessary + + - fall back for uncommon cases + +|pause| + +* Most of runtime spent in **loops** + +* Always the same code paths (likely) + + +Tracing JIT +----------- + +* Interpret the program as usual + +* Detect **hot** loops + +* Tracing phase + + - **linear** trace + +* Compiling + +* Execute + + - guards to ensure correctness + +* Profit :-) + + +Tracing JIT phases +------------------- + +.. animage:: diagrams/tracing-phases-p*.pdf + :align: center + :scale: 100% From pypy.commits at gmail.com Sat Apr 2 10:08:58 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:08:58 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <56ffd27a.a2f2c20a.d765d.776d@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5620:e0d0635116b3 Date: 2016-04-02 15:32 +0200 http://bitbucket.org/pypy/extradoc/changeset/e0d0635116b3/ Log: more slides diff --git a/talk/bucharest2016/jit-frontend/diagrams/architecture.svg b/talk/bucharest2016/jit-frontend/diagrams/architecture.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/architecture.svg @@ -0,0 +1,700 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + rpython+codewriter+jitcode+timeline+metatracer+optimizer+backend+jitted + + + + + + def LOAD_GLOBAL(self): ... + + + + def STORE_FAST(self): ... + + + + def BINARY_ADD(self): ... + + + + + RPYTHON + + + + CODEWRITER + + + + + + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2).... + + + + + + ...p0 = getfield_gc(p0, 'locals_w')setarrayitem_gc(p0, i0, p1).... + + + + + ...promote_class(p0)i0 = getfield_gc(p0, 'intval')promote_class(p1)i1 = getfield_gc(p1, 'intval')i2 = int_add(i0, i1)if (overflowed) goto ...p2 = new_with_vtable('W_IntObject')setfield_gc(p2, i2, 'intval').... + + + + + + + + + JITCODE + + + + compile-time + runtime + + + META-TRACER + + + + + OPTIMIZER + + + + + BACKEND + + + + + ASSEMBLER + + + + diff --git a/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg b/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg @@ -0,0 +1,346 @@ + + + + + + + + + + image/svg+xml + + + + + + + python+dis+trace0+trace1+trace2+trace3 + + + def fn(): c = a+b ... + + + LOAD_GLOBAL ALOAD_GLOBAL BBINARY_ADDSTORE_FAST C + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2)... + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2)... + + + ...guard_class(p0, W_IntObject)i0 = getfield_gc(p0, 'intval')guard_class(p1, W_IntObject)i1 = getfield_gc(p1, 'intval')i2 = int_add(00, i1)guard_not_overflow()p2 = new_with_vtable('W_IntObject')setfield_gc(p2, i2, 'intval')... + + + ...p0 = getfield_gc(p0, 'locals_w')setarrayitem_gc(p0, i0, p1).... + + diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -93,7 +93,7 @@ -Part 1 +Part 2 ------ **Overview of tracing JITs** @@ -147,3 +147,320 @@ .. animage:: diagrams/tracing-phases-p*.pdf :align: center :scale: 100% + + +Trace trees +----------- + +WRITE ME + +Part 3 +------ + +**The PyPy JIT** + +General architecture +--------------------- + +.. animage:: diagrams/architecture-p*.pdf + :align: center + :scale: 24% + + +PyPy trace example +------------------- + +.. animage:: diagrams/pypytrace-p*.pdf + :align: center + :scale: 40% + + +PyPy optimizer +--------------- + +- intbounds + +- constant folding / pure operations + +- virtuals + +- string optimizations + +- heap (multiple get/setfield, etc) + +- unroll + + +Intbound optimization (1) +------------------------- + +|example<| |small| intbound.py |end_small| |>| + +.. sourcecode:: python + + def fn(): + i = 0 + while i < 5000: + i += 2 + return i + +|end_example| + +Intbound optimization (2) +-------------------------- + +|scriptsize| +|column1| +|example<| |small| unoptimized |end_small| |>| + +.. sourcecode:: python + + ... + i17 = int_lt(i15, 5000) + guard_true(i17) + i19 = int_add_ovf(i15, 2) + guard_no_overflow() + ... + +|end_example| + +|pause| + +|column2| +|example<| |small| optimized |end_small| |>| + +.. sourcecode:: python + + ... + i17 = int_lt(i15, 5000) + guard_true(i17) + i19 = int_add(i15, 2) + ... + +|end_example| +|end_columns| +|end_scriptsize| + +|pause| + +* It works **often** + +* array bound checking + +* intbound info propagates all over the trace + + +Virtuals (1) +------------- + +|example<| |small| virtuals.py |end_small| |>| + +.. sourcecode:: python + + def fn(): + i = 0 + while i < 5000: + i += 2 + return i + +|end_example| + + +Virtuals (2) +------------ + +|scriptsize| +|column1| +|example<| |small| unoptimized |end_small| |>| + +.. sourcecode:: python + + ... + guard_class(p0, W_IntObject) + i1 = getfield_pure(p0, 'intval') + i2 = int_add(i1, 2) + p3 = new(W_IntObject) + setfield_gc(p3, i2, 'intval') + ... + +|end_example| + +|pause| + +|column2| +|example<| |small| optimized |end_small| |>| + +.. sourcecode:: python + + ... + i2 = int_add(i1, 2) + ... + +|end_example| +|end_columns| +|end_scriptsize| + +|pause| + +* The most important optimization (TM) + +* It works both inside the trace and across the loop + +* It works for tons of cases + + - e.g. function frames + + +Constant folding (1) +--------------------- + +|example<| |small| constfold.py |end_small| |>| + +.. sourcecode:: python + + def fn(): + i = 0 + while i < 5000: + i += 2 + return i + +|end_example| + + +Constant folding (2) +-------------------- + +|scriptsize| +|column1| +|example<| |small| unoptimized |end_small| |>| + +.. sourcecode:: python + + ... + i1 = getfield_pure(p0, 'intval') + i2 = getfield_pure(, + 'intval') + i3 = int_add(i1, i2) + ... + +|end_example| + +|pause| + +|column2| +|example<| |small| optimized |end_small| |>| + +.. sourcecode:: python + + ... + i1 = getfield_pure(p0, 'intval') + i3 = int_add(i1, 2) + ... + +|end_example| +|end_columns| +|end_scriptsize| + +|pause| + +* It "finishes the job" + +* Works well together with other optimizations (e.g. virtuals) + +* It also does "normal, boring, static" constant-folding + + +Out of line guards (1) +----------------------- + +|example<| |small| outoflineguards.py |end_small| |>| + +.. sourcecode:: python + + N = 2 + def fn(): + i = 0 + while i < 5000: + i += N + return i + +|end_example| + + +Out of line guards (2) +---------------------- + +|scriptsize| +|column1| +|example<| |small| unoptimized |end_small| |>| + +.. sourcecode:: python + + ... + quasiimmut_field(, 'val') + guard_not_invalidated() + p0 = getfield_gc(, 'val') + ... + i2 = getfield_pure(p0, 'intval') + i3 = int_add(i1, i2) + +|end_example| + +|pause| + +|column2| +|example<| |small| optimized |end_small| |>| + +.. sourcecode:: python + + ... + guard_not_invalidated() + ... + i3 = int_add(i1, 2) + ... + +|end_example| +|end_columns| +|end_scriptsize| + +|pause| + +* Python is too dynamic, but we don't care :-) + +* No overhead in assembler code + +* Used a bit "everywhere" + + +Guards +------- + +- guard_true + +- guard_false + +- guard_class + +- guard_no_overflow + +- **guard_value** + +Promotion +--------- + +- guard_value + +- specialize code + +- make sure not to **overspecialize** + +- example: type of objects + +- example: function code objects, ... + +Conclusion +----------- + +- PyPy is cool :-) + +- Any question? From pypy.commits at gmail.com Sat Apr 2 10:09:00 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:09:00 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more slides Message-ID: <56ffd27c.c65b1c0a.3b8a2.3b0e@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5621:5bbdec29c586 Date: 2016-04-02 16:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/5bbdec29c586/ Log: more slides diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -54,6 +54,9 @@ - "like GCC" (it statically produces a binary) + - you can run RPython programs on top of CPython (veeery slow, for + development only) + |pause| * ``pypy``: a Python interpreter @@ -159,6 +162,77 @@ **The PyPy JIT** + +Terminology (1) +---------------- + +* **translation time**: when you run "rpython targetpypy.py" to get the + ``pypy` binary + +* **runtime**: everything which happens after you start ``pypy`` + +* **interpretation**, **tracing**, **compiling** + +* **assembler/machine code**: the output of the JIT compiler + +* **execution time**: when your Python program is being executed + + - by the interpreter + + - by the machine code + + +Terminology (2) +---------------- + +* **interp-level**: things written in RPython + +* **[PyPy] interpreter**: the RPython program which executes the final Python + programs + +* **bytecode**: "the output of dis.dis". It is executed by the PyPy + interpreter. + +* **app-level**: things written in Python, and executed by the PyPy + Interpreter + + +Terminology (3) +--------------- + +* (the following is not 100% accurate but it's enough to understand the + general principle) + +* **low level op or ResOperation** + + - low-level instructions like "add two integers", "read a field out of a + struct", "call this function" + + - (more or less) the same level of C ("portable assembler") + + - knows about GC objects (e.g. you have ``getfield_gc`` vs ``getfield_raw``) + + +* **jitcodes**: low-level representation of RPython functions + + - sequence of low level ops + + - generated at **translation time** + + - 1 RPython function --> 1 C function --> 1 jitcode + + +* **JIT traces or loops** + + - a very specific sequence of llops as actually executed by your Python program + + - generated at **runtime** (more specifically, during **tracing**) + +* **JIT optimizer**: takes JIT traces and emits JIT traces + +* **JIT backend**: takes JIT traces and emits machine code + + General architecture --------------------- From pypy.commits at gmail.com Sat Apr 2 10:09:11 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:09:11 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: generate talk.pdf Message-ID: <56ffd287.86351c0a.25a9c.38f7@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5624:bd451b255eab Date: 2016-04-02 16:08 +0200 http://bitbucket.org/pypy/extradoc/changeset/bd451b255eab/ Log: generate talk.pdf diff --git a/talk/bucharest2016/jit-frontend/talk.pdf b/talk/bucharest2016/jit-frontend/talk.pdf new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4ad8022637420b08f8a93bc24ddfd00d4b13adb2 GIT binary patch [cut] From pypy.commits at gmail.com Sat Apr 2 10:09:03 2016 From: pypy.commits at gmail.com (antocuni) Date: Sat, 02 Apr 2016 07:09:03 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: split into two slides Message-ID: <56ffd27f.cf0b1c0a.5d9b.3c22@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5623:3bd035f4d6ec Date: 2016-04-02 16:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/3bd035f4d6ec/ Log: split into two slides diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -167,7 +167,7 @@ ---------------- * **translation time**: when you run "rpython targetpypy.py" to get the - ``pypy` binary + ``pypy`` binary * **runtime**: everything which happens after you start ``pypy`` @@ -200,6 +200,7 @@ Terminology (3) --------------- + * (the following is not 100% accurate but it's enough to understand the general principle) @@ -222,6 +223,9 @@ - 1 RPython function --> 1 C function --> 1 jitcode +Terminology (4) +--------------- + * **JIT traces or loops** - a very specific sequence of llops as actually executed by your Python program From pypy.commits at gmail.com Sat Apr 2 13:18:05 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Apr 2016 10:18:05 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: Add the current status of the piratepad Message-ID: <56fffecd.0d3f1c0a.efece.776e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5625:eb069da8950f Date: 2016-04-02 19:18 +0200 http://bitbucket.org/pypy/extradoc/changeset/eb069da8950f/ Log: Add the current status of the piratepad diff --git a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt @@ -0,0 +1,119 @@ +pypy's assembler backend + +input: linear sequence of instructions, called a "trace". + +A trace is a sequence of instructions in SSA form. Most instructions correspond to one or a few CPU-level instructions. There are a few meta-instructions like `label` and debugging stuff. All branching is done with guards, which are instructions that check that a condition is true and exit the trace if not. A failing guard can have a new trace added to it later, called a "bridge". A patched guard becomes a direct `Jcond` instruction going to the bridge, with no indirection, no register spilling, etc. + +A trace ends with either a `return` or a `jump to label`. The target label is either inside the same trace, or in some older one. For historical reasons we call a "loop" a trace that is not a bridge. The machine code that we generate is organized as a forest of trees; the trunk of the tree is a "loop", and the branches are all bridges (branching off the trunk or off another branch). + +* every trunk or branch that ends in a `jump to label` can target a label from a different tree, too. + +* the whole process of assembling a loop or a branch is basically single-threaded, so no synchronization issue there (including to patch older generated instructions). + +* the generated assembler has got a "frame" in %rbp, which is actually not on the stack at all, but is a GC object (called a "jitframe"). Spilling goes there. + +* the guards are `Jcond` to a very small piece of generated code, which is basically pushing a couple of constants on the stack and then jumping to the general guard-recovery code. That code will save the registers into the jitframe and then exit the whole generated function. The caller of that generated function checks how it finished: if it finished by hitting a guard, then the caller is responsible for calling the "blackhole interpreter". This is the part of the front-end that recovers from failing guards and finishes running the frame (including, possibly, by jumping again into generated assembler). + + +Details about the JITting process: + +* front-end and optimization pass +* rewrite (includes gc related transformation as well as simplifactions) +* assembler generation + + +# Front-end and optimization pass + +Not discussed here in detail. This produces loops and bridges using an instruction set that is "high-level" in some sense: it contains intructions like "new"/"new_array", and "setfield"/"setarrayitem"/"setinteriorfield" which describe the action of storing a value in a precise field of the structure or array. For example, the "setfield" action might require implicitly a GC write barrier. This is the high-level trace that we send to the following step. + + +# Rewrite + +A mostly but not completely CPU-independent phase: lowers some instructions. For example, the variants of "new" are lowered to "malloc" and a few "gc_store": it bumps the pointer of the GC and then sets a few fields explicitly in the newly allocated structure. The "setfield" is replaced with a "cond_gc_wb_call" (conditional call to the write barrier) if needed, followed by a "gc_store". + +The "gc_store" instruction can be encoded in a single MOV assembler instruction, but is not as flexible as a MOV. The address is always specified as "some GC pointer + an offset". We don't have the notion of interior pointer for GC objects. + +A different instruction, "gc_store_indexed", offers additional operands, which can be mapped to a single MOV instruction using forms like `[rax+8*rcx+24]`. + +Some other complex instructions pass through to the backend, which must deal with them: for example, "card marking" in the GC. (Writing an object pointer inside an array would require walking the whole array later to find "young" references. Instead of that, we flip a bit for every range of 128 entries. This is a common GC optimization.) Setting the card bit of a GC object requires a sequence of assembler instructions that depends too much on the target CPU to be expressed explicitly here (moreover, it contains a few branches, which are hard to express at this level). + + + + +# Assembly + +No fancy code generation technique, but greedy forward pass that tries to avoid some pitfalls + + +## Handling instructions + +* One by one (forward direction). Each instruction asks the register allocator to ensure that some arguments are in registers (not in the jitframe); asks for a register to put its result into; and asks for additional scratch registers that will be freed at the end of the instruction. There is a special case for boolean variables: they are stored in the condition code flags instead of being materialized as a 0/1 value. (They are materialized later, except in the common case where they are only used by the next `guard_false` or `guard_true` and then forgotten.) + +* Instruction arguments are loaded into a register on demand. This makes the backend quite easy to write, but leads do some bad decisions. + + +## Linear scan register allocation + +Although it's always a linear trace that we consider, we don't use advanced techniques for register allocation: we do forward, on-demand allocation as the backend produces the assembler. When it asks for a register to put some value into, we give it any free register, without consideration for what will be done with it later. We compute the longevity of all variables, but only use it when choosing which register to spill (we spill the variable with the longest longevity). + +This works to some extend because it is well integrated with the earlier optimization pass. Loops are unrolled once by the optimization pass to allow more powerful optimizations---the optimization pass itself is the place that benefits the most, but it also has benefits here in the assembly pass. These are: + +* The first peeling initializes the register binding on the first use. +* This leads to an already allocated register of the trace loop. +* As well as allocated registers when exiting bridges + +[Try to better allocate registers to match the ABI (minor to non benefit in the current state)] + + +## More complex mappings + +Some instructions generate more complex code. These are either or both of: + +* complex instructions generating some local control flow, like "cond_gc_wb_call" (for write barriers), "call_assembler" (a call followed by a few checks). + +* instructions that invoke custom assembler helpers, like the slow-path of write barriers or the slow-path of allocations. These slow-paths are typically generated too, so that we are not constrained by the usual calling conventions. + + +## GC pointers + +Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the positions in the registers and stack frame are fixed by the GC. That's a reason for why we don't have explicit interior pointers. + +GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" table can actually change if the GC objects move. + + +## Vectorization + +Optimization developed to use SIMD instructions for trace loops. Primary idea was to use it as an optimization of micro numpy. It has several passes on the already optimized trace. + +Shortly explained: It builds dependencies for an unrolled trace loop, gathering pairs/packs of operations that could be executed in parallel and finally schedules the operations. + +What did it add to the code base: + +* Dependencies can be constructed +* Code motion of guards to relax dependencies +* Scheduler to reorder trace +* Array bound check removal (especially for unrolled traces) + +What can it do: + +* Transform vector loops (element wise operations) +* Accumulation (reduce([...],operator,0)). Requires Operation to be associative and commutative +* SSE 4.1 as ``vector backend'' + +## We do not + +* Keep tracing data around to reoptimize the trace tree. (Once a trace is compiled, minimal data is kept.) This is one reason (there are others in the front-end) for the following result: JIT-compiling a small loop with two common paths ends up as one "loop" and one bridge assembled, and the bridge-following path is slightly less efficient. This is notably because this bridge is assembled with two constraints: the input registers are fixed (from the guard), and the output registers are fixed (from the jump target); usually these two sets of fixed registers are different, and copying around is needed. + +* We don't join trace tails: we only assemble *trees*. + +* We don't do any reordering (neither of trace instructions nor of individual assembler instructions) + +* We don't do any cross-instruction optimization that makes sense only for the backend and can't easily be expressed at a higher level. I'm sure there are tons of examples of that, but e.g. loading a large constant in a register that will survive for several instructions; moving out of loops *parts* of some instruction like the address calculation; etc. etc. + +* Other optimization opportunities I can think about: look at the function prologue/epilogue; look at the overhead (small but not zero) at the start of a bridge. Also check if the way guards are implemented makes sense. Also, we generate large-ish sequences of assembler instructions with tons of `Jcond` that are almost never followed; any optimization opportunity there? (They all go forward, if it changes anything.) In theory we could also replace some of these with a signal handler on segfault (e.g. `guard_nonnull_class`). + + +# a GCC or LLVM backend? + +At least for comparison we'd like a JIT backend that emits its code using GCC or LLVM (irrespective of the time it would take). But it's hard to map reasonably well the guards to the C language or to LLVM IR. The problems are: (1) we have many guards, we would like to avoid having many paths that each do a full saving-all-local-variables-that-are-still-alive; (2) it's hard to patch a guard when a bridge is compiled from it; (3) instructions like a CALL need to expose the local variables that are GC pointers; CALL_MAY_FORCE need to expose *all* local variables for optional off-line reconstruction of the interpreter state. + From pypy.commits at gmail.com Sat Apr 2 16:11:23 2016 From: pypy.commits at gmail.com (sbauman) Date: Sat, 02 Apr 2016 13:11:23 -0700 (PDT) Subject: [pypy-commit] pypy remove-getarrayitem-pure: Start removing getarrayitem_gc_pure ops Message-ID: <5700276b.07b71c0a.38fdd.ffffae7b@mx.google.com> Author: Spenser Bauman Branch: remove-getarrayitem-pure Changeset: r83510:84f6ca586b1c Date: 2016-04-02 14:59 -0400 http://bitbucket.org/pypy/pypy/changeset/84f6ca586b1c/ Log: Start removing getarrayitem_gc_pure ops diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -360,7 +360,7 @@ else: lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT) flag = get_type_flag(ARRAY_INSIDE.OF) - is_pure = bool(ARRAY_INSIDE._immutable_field(None)) + is_pure = ARRAY_INSIDE._immutable_field(None) != False arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag, is_pure) if ARRAY_INSIDE.OF is lltype.SingleFloat or \ ARRAY_INSIDE.OF is lltype.Float: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -475,19 +475,50 @@ @arguments("box", "box", "descr") def opimpl_getarrayitem_gc_i(self, arraybox, indexbox, arraydescr): + if (arraydescr.is_always_pure() and + isinstance(arraybox, ConstPtr) and + isinstance(indexbox, ConstInt)): + # if the arguments are directly constants, bypass the heapcache + # completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETARRAYITEM_GC_I, arraydescr, + arraybox, indexbox) + return executor.wrap_constant(val) return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_I, arraybox, indexbox, arraydescr) @arguments("box", "box", "descr") def opimpl_getarrayitem_gc_r(self, arraybox, indexbox, arraydescr): + if (arraydescr.is_always_pure() and + isinstance(arraybox, ConstPtr) and + isinstance(indexbox, ConstInt)): + # if the arguments are directly constants, bypass the heapcache + # completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETARRAYITEM_GC_R, arraydescr, + arraybox, indexbox) + return executor.wrap_constant(val) return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_R, arraybox, indexbox, arraydescr) @arguments("box", "box", "descr") def opimpl_getarrayitem_gc_f(self, arraybox, indexbox, arraydescr): + if (arraydescr.is_always_pure() and + isinstance(arraybox, ConstPtr) and + isinstance(indexbox, ConstInt)): + # if the arguments are directly constants, bypass the heapcache + # completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETARRAYITEM_GC_F, arraydescr, + arraybox, indexbox) + return executor.wrap_constant(val) return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_F, arraybox, indexbox, arraydescr) + opimpl_getarrayitem_gc_i_pure = opimpl_getarrayitem_gc_i + opimpl_getarrayitem_gc_r_pure = opimpl_getarrayitem_gc_r + opimpl_getarrayitem_gc_f_pure = opimpl_getarrayitem_gc_f + @arguments("box", "box", "descr") def opimpl_getarrayitem_raw_i(self, arraybox, indexbox, arraydescr): return self.execute_with_descr(rop.GETARRAYITEM_RAW_I, @@ -498,42 +529,6 @@ return self.execute_with_descr(rop.GETARRAYITEM_RAW_F, arraydescr, arraybox, indexbox) - @arguments("box", "box", "descr") - def opimpl_getarrayitem_gc_i_pure(self, arraybox, indexbox, arraydescr): - if isinstance(arraybox, ConstPtr) and isinstance(indexbox, ConstInt): - # if the arguments are directly constants, bypass the heapcache - # completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETARRAYITEM_GC_PURE_I, arraydescr, - arraybox, indexbox) - return executor.wrap_constant(val) - return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_PURE_I, - arraybox, indexbox, arraydescr) - - @arguments("box", "box", "descr") - def opimpl_getarrayitem_gc_f_pure(self, arraybox, indexbox, arraydescr): - if isinstance(arraybox, ConstPtr) and isinstance(indexbox, ConstInt): - # if the arguments are directly constants, bypass the heapcache - # completely - resval = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETARRAYITEM_GC_PURE_F, arraydescr, - arraybox, indexbox) - return executor.wrap_constant(resval) - return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_PURE_F, - arraybox, indexbox, arraydescr) - - @arguments("box", "box", "descr") - def opimpl_getarrayitem_gc_r_pure(self, arraybox, indexbox, arraydescr): - if isinstance(arraybox, ConstPtr) and isinstance(indexbox, ConstInt): - # if the arguments are directly constants, bypass the heapcache - # completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETARRAYITEM_GC_PURE_R, arraydescr, - arraybox, indexbox) - return executor.wrap_constant(val) - return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_PURE_R, - arraybox, indexbox, arraydescr) - @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1052,7 +1052,7 @@ 'ARRAYLEN_GC/1d/i', 'STRLEN/1/i', 'STRGETITEM/2/i', - 'GETARRAYITEM_GC_PURE/2d/rfi', + # 'GETARRAYITEM_GC_PURE/2d/rfi', 'UNICODELEN/1/i', 'UNICODEGETITEM/2/i', # @@ -1356,6 +1356,10 @@ opnum == rop.GETFIELD_GC_I or opnum == rop.GETFIELD_GC_R or opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_GC_I or + opnum == rop.GETARRAYITEM_GC_R or + opnum == rop.GETARRAYITEM_GC_F or + opnum == rop.ARRAYLEN_GC or opnum == rop.GETARRAYITEM_RAW_I or opnum == rop.GETARRAYITEM_RAW_F): return descr.is_always_pure() From pypy.commits at gmail.com Sat Apr 2 16:11:25 2016 From: pypy.commits at gmail.com (sbauman) Date: Sat, 02 Apr 2016 13:11:25 -0700 (PDT) Subject: [pypy-commit] pypy remove-getarrayitem-pure: Make some of the tests work Message-ID: <5700276d.034cc20a.40b40.ffffe2ee@mx.google.com> Author: Spenser Bauman Branch: remove-getarrayitem-pure Changeset: r83511:e2e277a65e93 Date: 2016-04-02 16:06 -0400 http://bitbucket.org/pypy/pypy/changeset/e2e277a65e93/ Log: Make some of the tests work diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -554,13 +554,22 @@ def optimize_GETARRAYITEM_GC_I(self, op): arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) + arraydescr = op.getdescr() + + if (arraydescr.is_always_pure() and + self.get_constant_box(op.getarg(0)) is not None and + self.get_constant_box(op.getarg(1)) is not None): + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return + cf = None if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant - cf = self.arrayitem_cache(op.getdescr(), index) - field = cf.getfield_from_cache(self, arrayinfo, op.getdescr()) + cf = self.arrayitem_cache(arraydescr, index) + field = cf.getfield_from_cache(self, arrayinfo, arraydescr) if field is not None: self.make_equal_to(op, field) return @@ -573,36 +582,13 @@ self.emit_operation(op) # then remember the result of reading the array item if cf is not None: - arrayinfo.setitem(op.getdescr(), indexb.getint(), + arrayinfo.setitem(arraydescr, indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), optheap=self, cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I - def optimize_GETARRAYITEM_GC_PURE_I(self, op): - arrayinfo = self.ensure_ptr_info_arg0(op) - indexb = self.getintbound(op.getarg(1)) - cf = None - if indexb.is_constant(): - index = indexb.getint() - arrayinfo.getlenbound(None).make_gt_const(index) - # use the cache on (arraydescr, index), which is a constant - cf = self.arrayitem_cache(op.getdescr(), index) - fieldvalue = cf.getfield_from_cache(self, arrayinfo, op.getdescr()) - if fieldvalue is not None: - self.make_equal_to(op, fieldvalue) - return - else: - # variable index, so make sure the lazy setarrayitems are done - self.force_lazy_setarrayitem(op.getdescr(), self.getintbound(op.getarg(1))) - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - - optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_PURE_I - optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_PURE_I - def optimize_SETARRAYITEM_GC(self, op): #opnum = OpHelpers.getarrayitem_pure_for_descr(op.getdescr()) #if self.has_pure_result(opnum, [op.getarg(0), op.getarg(1)], diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -757,24 +757,20 @@ check at all, but then we don't unroll in that case. """ opnum = op.getopnum() + descr = op.getdescr() cpu = self.cpu - if OpHelpers.is_pure_getfield(opnum, op.getdescr()): - fielddescr = op.getdescr() + if OpHelpers.is_pure_getfield(opnum, descr): ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) + cpu.protect_speculative_field(ref, descr) return - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() + elif OpHelpers.is_pure_getarrayitem(opnum, descr): array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) + cpu.protect_speculative_array(array, descr) if opnum == rop.ARRAYLEN_GC: return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) + arraylength = cpu.bh_arraylen_gc(array, descr) elif (opnum == rop.STRGETITEM or opnum == rop.STRLEN): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5403,15 +5403,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) + i1 = getarrayitem_gc_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) + i2 = getarrayitem_gc_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) + i1 = getarrayitem_gc_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7869,7 +7869,7 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) + i843 = getarrayitem_gc_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ @@ -8873,7 +8873,7 @@ ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) + ix = getarrayitem_gc_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) @@ -8905,7 +8905,7 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) + f0 = getarrayitem_gc_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ @@ -9141,7 +9141,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getarrayitem_gc_pure_i(p0, 5, descr=arraydescr) + getarrayitem_gc_i(p0, 5, descr=arrayimmutdescr) i3 = int_sub(i1, 1) jump(NULL, i3) """ @@ -9152,7 +9152,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getarrayitem_gc_pure_i(p0, 5, descr=arraydescr) + getarrayitem_gc_i(p0, 5, descr=arrayimmutdescr) i3 = int_sub(i1, 1) jump(ConstPtr(myptr3), i3) """ @@ -9163,7 +9163,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getarrayitem_gc_pure_i(p0, 125, descr=arraydescr) + getarrayitem_gc_i(p0, 125, descr=arrayimmutdescr) i3 = int_sub(i1, 1) jump(ConstPtr(arrayref), i3) # too short, length < 126! """ @@ -9174,7 +9174,7 @@ [i0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getarrayitem_gc_pure_i(ConstPtr(arrayref), i0, descr=arraydescr) + getarrayitem_gc_i(ConstPtr(arrayref), i0, descr=arrayimmutdescr) i3 = int_sub(i1, 1) jump(125, i3) # arrayref is too short, length < 126! """ @@ -9185,7 +9185,7 @@ [i0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getarrayitem_gc_pure_i(ConstPtr(arrayref), i0, descr=arraydescr) + getarrayitem_gc_i(ConstPtr(arrayref), i0, descr=arrayimmutdescr) i3 = int_sub(i1, 1) jump(-1, i3) # cannot access array item -1! """ diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -286,12 +286,6 @@ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure(). - optimize_GETARRAYITEM_GC_PURE_I = optimize_GETARRAYITEM_GC_I - optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_I - optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_I - def optimize_SETARRAYITEM_GC(self, op): opinfo = self.getptrinfo(op.getarg(0)) if opinfo and opinfo.is_virtual(): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1281,14 +1281,6 @@ return rop.GETFIELD_GC_I @staticmethod - def getarrayitem_pure_for_descr(descr): - if descr.is_array_of_pointers(): - return rop.GETARRAYITEM_GC_PURE_R - elif descr.is_array_of_floats(): - return rop.GETARRAYITEM_GC_PURE_F - return rop.GETARRAYITEM_GC_PURE_I - - @staticmethod def getarrayitem_for_descr(descr): if descr.is_array_of_pointers(): return rop.GETARRAYITEM_GC_R @@ -1368,8 +1360,25 @@ @staticmethod def is_pure_getfield(opnum, descr): if (opnum == rop.GETFIELD_GC_I or - opnum == rop.GETFIELD_GC_F or - opnum == rop.GETFIELD_GC_R): + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F): + return descr is not None and descr.is_always_pure() + return False + + @staticmethod + def is_pure_getarrayitem(opnum, descr): + if (opnum == rop.GETARRAYITEM_GC_I or + opnum == rop.GETARRAYITEM_GC_R or + opnum == rop.GETARRAYITEM_GC_F): + return descr is not None and descr.is_always_pure() + return False + + @staticmethod + def is_pure_arrayref(opnum, descr): + if (opnum == rop.GETARRAYITEM_GC_I or + opnum == rop.GETARRAYITEM_GC_R or + opnum == rop.GETARRAYITEM_GC_F or + opnum == rop.ARRAYLEN_GC): return descr is not None and descr.is_always_pure() return False @@ -1401,10 +1410,7 @@ @staticmethod def is_getarrayitem(opnum): - return opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, - rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, - rop.GETARRAYITEM_GC_PURE_F, - rop.GETARRAYITEM_GC_PURE_R) + return opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, rop.GETARRAYITEM_GC_R) @staticmethod def is_real_call(opnum): @@ -1721,10 +1727,6 @@ rop.GETARRAYITEM_RAW_F: rop.VEC_GETARRAYITEM_RAW_F, rop.GETARRAYITEM_GC_I: rop.VEC_GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F: rop.VEC_GETARRAYITEM_GC_F, - # note that there is no _PURE operation for vector operations. - # reason: currently we do not care if it is pure or not! - rop.GETARRAYITEM_GC_PURE_I: rop.VEC_GETARRAYITEM_GC_I, - rop.GETARRAYITEM_GC_PURE_F: rop.VEC_GETARRAYITEM_GC_F, rop.RAW_STORE: rop.VEC_RAW_STORE, rop.SETARRAYITEM_RAW: rop.VEC_SETARRAYITEM_RAW, rop.SETARRAYITEM_GC: rop.VEC_SETARRAYITEM_GC, From pypy.commits at gmail.com Sun Apr 3 04:43:15 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 03 Apr 2016 01:43:15 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: write a blog post Message-ID: <5700d7a3.0d3f1c0a.efece.5041@mx.google.com> Author: fijal Branch: extradoc Changeset: r5626:ac090253677d Date: 2016-04-03 09:33 +0100 http://bitbucket.org/pypy/extradoc/changeset/ac090253677d/ Log: write a blog post diff --git a/blog/draft/jit-leaner-frontend.rst b/blog/draft/jit-leaner-frontend.rst new file mode 100644 --- /dev/null +++ b/blog/draft/jit-leaner-frontend.rst @@ -0,0 +1,69 @@ +Next stage of PyPy warmup improvements +====================================== + +Hello everyone. + +I'm pleased to inform that we've finished another round of +improvements to the warmup performance of PyPy. Before I go +into details, I'll recap achievements that we've done since we've started +working on the warmup performance. I picked a random PyPy from November 2014 +(which is definitely before we started the warmup work) and compared it with +a recent one, after 5.0. The exact revisions are respectively ``ffce4c795283`` +and ``cfbb442ae368``. First let's compare `pure warmup benchmarks`_ that +can be found in our benchmarking suite. Out of those, +``pypy-graph-alloc-removal`` could have been improved in the meantime by +doing other work on PyPy, while the rest is purely focused on warmup. + +You can see how much your program spends in warmup running +``PYPYLOG=jit-summary:- pypy your-program.py`` under "tracing" and "backend" +fields. + ++---------------------------+------------+------------+---------+----------------+----------------+ +| benchmark | time - old | time - new | speedup | JIT time - old | JIT time - new | ++---------------------------+------------+------------+---------+----------------+----------------+ +| function_call | 1.86 | 1.42 | 1.3x | 1.12s | 0.57s | ++---------------------------+------------+------------+---------+----------------+----------------+ +| function_call2 | 5.17s | 2.73s | 1.9x | 4.2s | 1.6s | ++---------------------------+------------+------------+---------+----------------+----------------+ +| bridges | 2.77s | 2.07s | 1.3x | 1.5s | 0.8s | ++---------------------------+------------+------------+---------+----------------+----------------+ +| pypy-graph-alloc-removal | 2.06s | 1.65s | 1.25x | 1.25s | 0.79s | ++---------------------------+------------+------------+---------+----------------+----------------+ + +.. `pure warmup benchmarks`: https://bitbucket.org/pypy/benchmarks/src/59290b59a24e54057d4c694fa4f47e7879a347a0/warmup/?at=default + +As we can see, the overall warmup benchmarks got up to **90% faster** with +JIT time dropping by up to **2.5x**. We have more optimizations in the pipeline, +with an idea how to transfer some of the JIT gains into more of a total program +runtime by jitting earlier and more eager. + +Details of the last round of optimizations +------------------------------------------ + +Now the nitty gritty details - what did we actually do? I covered a lot of +warmup improvements in the past blog posts so I'm going to focus on +the last change, jit-leaner-frontend branch. The last change is simple, instead of using +pointers to store the "operations" object after tracing, we use a compact list of +16-bit integers (with 16bit pointers in between). On 64bit machine the wins are +tremendous - it's 4x more efficient to use 16bit pointers than full 64bit pointers. +Additionally those pointers have a much better defined lifespan, so we don't need to +bother tracking them by the GC, which also saves quite a bit of time. + +Now the change sounds simple, but the details in the underlaying data mean that +everything in the JIT had to be changed which took quite a bit of effort :-) + +Going into the future in the JIT front, we have an exciting set of optimizations, +ranging from faster loops through faster warmup to using better code generation +techniques and broadening the kind of program that PyPy speeds up. Stay tuned +for the updates. + +We would like to thank our commercial partners for making all of this possible. +The work has been performed by baroquesoftware.com and would not be possible +without support from people using PyPy in production. If your company uses +PyPy and want it to do more or does not use PyPy but has performance problems +with the Python instalation, feel free to get in touch with me, trust me using +PyPy ends up being a lot cheaper than rewriting everything in go :-) + +Best regards, +Maciej Fijalkowski + From pypy.commits at gmail.com Sun Apr 3 04:43:17 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 03 Apr 2016 01:43:17 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <5700d7a5.4e941c0a.71bca.46d9@mx.google.com> Author: fijal Branch: extradoc Changeset: r5627:cf47f49e413a Date: 2016-04-03 09:43 +0100 http://bitbucket.org/pypy/extradoc/changeset/cf47f49e413a/ Log: merge diff too long, truncating to 2000 out of 2393 lines diff --git a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt @@ -0,0 +1,119 @@ +pypy's assembler backend + +input: linear sequence of instructions, called a "trace". + +A trace is a sequence of instructions in SSA form. Most instructions correspond to one or a few CPU-level instructions. There are a few meta-instructions like `label` and debugging stuff. All branching is done with guards, which are instructions that check that a condition is true and exit the trace if not. A failing guard can have a new trace added to it later, called a "bridge". A patched guard becomes a direct `Jcond` instruction going to the bridge, with no indirection, no register spilling, etc. + +A trace ends with either a `return` or a `jump to label`. The target label is either inside the same trace, or in some older one. For historical reasons we call a "loop" a trace that is not a bridge. The machine code that we generate is organized as a forest of trees; the trunk of the tree is a "loop", and the branches are all bridges (branching off the trunk or off another branch). + +* every trunk or branch that ends in a `jump to label` can target a label from a different tree, too. + +* the whole process of assembling a loop or a branch is basically single-threaded, so no synchronization issue there (including to patch older generated instructions). + +* the generated assembler has got a "frame" in %rbp, which is actually not on the stack at all, but is a GC object (called a "jitframe"). Spilling goes there. + +* the guards are `Jcond` to a very small piece of generated code, which is basically pushing a couple of constants on the stack and then jumping to the general guard-recovery code. That code will save the registers into the jitframe and then exit the whole generated function. The caller of that generated function checks how it finished: if it finished by hitting a guard, then the caller is responsible for calling the "blackhole interpreter". This is the part of the front-end that recovers from failing guards and finishes running the frame (including, possibly, by jumping again into generated assembler). + + +Details about the JITting process: + +* front-end and optimization pass +* rewrite (includes gc related transformation as well as simplifactions) +* assembler generation + + +# Front-end and optimization pass + +Not discussed here in detail. This produces loops and bridges using an instruction set that is "high-level" in some sense: it contains intructions like "new"/"new_array", and "setfield"/"setarrayitem"/"setinteriorfield" which describe the action of storing a value in a precise field of the structure or array. For example, the "setfield" action might require implicitly a GC write barrier. This is the high-level trace that we send to the following step. + + +# Rewrite + +A mostly but not completely CPU-independent phase: lowers some instructions. For example, the variants of "new" are lowered to "malloc" and a few "gc_store": it bumps the pointer of the GC and then sets a few fields explicitly in the newly allocated structure. The "setfield" is replaced with a "cond_gc_wb_call" (conditional call to the write barrier) if needed, followed by a "gc_store". + +The "gc_store" instruction can be encoded in a single MOV assembler instruction, but is not as flexible as a MOV. The address is always specified as "some GC pointer + an offset". We don't have the notion of interior pointer for GC objects. + +A different instruction, "gc_store_indexed", offers additional operands, which can be mapped to a single MOV instruction using forms like `[rax+8*rcx+24]`. + +Some other complex instructions pass through to the backend, which must deal with them: for example, "card marking" in the GC. (Writing an object pointer inside an array would require walking the whole array later to find "young" references. Instead of that, we flip a bit for every range of 128 entries. This is a common GC optimization.) Setting the card bit of a GC object requires a sequence of assembler instructions that depends too much on the target CPU to be expressed explicitly here (moreover, it contains a few branches, which are hard to express at this level). + + + + +# Assembly + +No fancy code generation technique, but greedy forward pass that tries to avoid some pitfalls + + +## Handling instructions + +* One by one (forward direction). Each instruction asks the register allocator to ensure that some arguments are in registers (not in the jitframe); asks for a register to put its result into; and asks for additional scratch registers that will be freed at the end of the instruction. There is a special case for boolean variables: they are stored in the condition code flags instead of being materialized as a 0/1 value. (They are materialized later, except in the common case where they are only used by the next `guard_false` or `guard_true` and then forgotten.) + +* Instruction arguments are loaded into a register on demand. This makes the backend quite easy to write, but leads do some bad decisions. + + +## Linear scan register allocation + +Although it's always a linear trace that we consider, we don't use advanced techniques for register allocation: we do forward, on-demand allocation as the backend produces the assembler. When it asks for a register to put some value into, we give it any free register, without consideration for what will be done with it later. We compute the longevity of all variables, but only use it when choosing which register to spill (we spill the variable with the longest longevity). + +This works to some extend because it is well integrated with the earlier optimization pass. Loops are unrolled once by the optimization pass to allow more powerful optimizations---the optimization pass itself is the place that benefits the most, but it also has benefits here in the assembly pass. These are: + +* The first peeling initializes the register binding on the first use. +* This leads to an already allocated register of the trace loop. +* As well as allocated registers when exiting bridges + +[Try to better allocate registers to match the ABI (minor to non benefit in the current state)] + + +## More complex mappings + +Some instructions generate more complex code. These are either or both of: + +* complex instructions generating some local control flow, like "cond_gc_wb_call" (for write barriers), "call_assembler" (a call followed by a few checks). + +* instructions that invoke custom assembler helpers, like the slow-path of write barriers or the slow-path of allocations. These slow-paths are typically generated too, so that we are not constrained by the usual calling conventions. + + +## GC pointers + +Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the positions in the registers and stack frame are fixed by the GC. That's a reason for why we don't have explicit interior pointers. + +GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" table can actually change if the GC objects move. + + +## Vectorization + +Optimization developed to use SIMD instructions for trace loops. Primary idea was to use it as an optimization of micro numpy. It has several passes on the already optimized trace. + +Shortly explained: It builds dependencies for an unrolled trace loop, gathering pairs/packs of operations that could be executed in parallel and finally schedules the operations. + +What did it add to the code base: + +* Dependencies can be constructed +* Code motion of guards to relax dependencies +* Scheduler to reorder trace +* Array bound check removal (especially for unrolled traces) + +What can it do: + +* Transform vector loops (element wise operations) +* Accumulation (reduce([...],operator,0)). Requires Operation to be associative and commutative +* SSE 4.1 as ``vector backend'' + +## We do not + +* Keep tracing data around to reoptimize the trace tree. (Once a trace is compiled, minimal data is kept.) This is one reason (there are others in the front-end) for the following result: JIT-compiling a small loop with two common paths ends up as one "loop" and one bridge assembled, and the bridge-following path is slightly less efficient. This is notably because this bridge is assembled with two constraints: the input registers are fixed (from the guard), and the output registers are fixed (from the jump target); usually these two sets of fixed registers are different, and copying around is needed. + +* We don't join trace tails: we only assemble *trees*. + +* We don't do any reordering (neither of trace instructions nor of individual assembler instructions) + +* We don't do any cross-instruction optimization that makes sense only for the backend and can't easily be expressed at a higher level. I'm sure there are tons of examples of that, but e.g. loading a large constant in a register that will survive for several instructions; moving out of loops *parts* of some instruction like the address calculation; etc. etc. + +* Other optimization opportunities I can think about: look at the function prologue/epilogue; look at the overhead (small but not zero) at the start of a bridge. Also check if the way guards are implemented makes sense. Also, we generate large-ish sequences of assembler instructions with tons of `Jcond` that are almost never followed; any optimization opportunity there? (They all go forward, if it changes anything.) In theory we could also replace some of these with a signal handler on segfault (e.g. `guard_nonnull_class`). + + +# a GCC or LLVM backend? + +At least for comparison we'd like a JIT backend that emits its code using GCC or LLVM (irrespective of the time it would take). But it's hard to map reasonably well the guards to the C language or to LLVM IR. The problems are: (1) we have many guards, we would like to avoid having many paths that each do a full saving-all-local-variables-that-are-still-alive; (2) it's hard to patch a guard when a bridge is compiled from it; (3) instructions like a CALL need to expose the local variables that are GC pointers; CALL_MAY_FORCE need to expose *all* local variables for optional off-line reconstruction of the interpreter state. + diff --git a/talk/bucharest2016/jit-frontend/Makefile b/talk/bucharest2016/jit-frontend/Makefile new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/Makefile @@ -0,0 +1,32 @@ +# you can find rst2beamer.py and inkscapeslide.py here: +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/rst2beamer.py +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py + + +talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf + rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf & + +xpdf: talk.pdf + xpdf talk.pdf & + + +diagrams/tracing-phases-p0.pdf: diagrams/tracing-phases.svg + cd diagrams && inkscapeslide.py tracing-phases.svg + +# diagrams/trace-p0.pdf: diagrams/trace.svg +# cd diagrams && inkscapeslide.py trace.svg + +# diagrams/tracetree-p0.pdf: diagrams/tracetree.svg +# cd diagrams && inkscapeslide.py tracetree.svg + +diagrams/architecture-p0.pdf: diagrams/architecture.svg + cd diagrams && inkscapeslide.py architecture.svg + +diagrams/pypytrace-p0.pdf: diagrams/pypytrace.svg + cd diagrams && inkscapeslide.py pypytrace.svg diff --git a/talk/bucharest2016/jit-frontend/author.latex b/talk/bucharest2016/jit-frontend/author.latex new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[PyPy Intro]{PyPy Intro and JIT Frontend} +\author[antocuni] +{Antonio Cuni} + +\institute{Intel at Bucharest} +\date{April 4 2016} diff --git a/talk/bucharest2016/jit-frontend/beamerdefs.txt b/talk/bucharest2016/jit-frontend/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/bucharest2016/jit-frontend/diagrams/architecture.svg b/talk/bucharest2016/jit-frontend/diagrams/architecture.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/architecture.svg @@ -0,0 +1,700 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + rpython+codewriter+jitcode+timeline+metatracer+optimizer+backend+jitted + + + + + + def LOAD_GLOBAL(self): ... + + + + def STORE_FAST(self): ... + + + + def BINARY_ADD(self): ... + + + + + RPYTHON + + + + CODEWRITER + + + + + + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2).... + + + + + + ...p0 = getfield_gc(p0, 'locals_w')setarrayitem_gc(p0, i0, p1).... + + + + + ...promote_class(p0)i0 = getfield_gc(p0, 'intval')promote_class(p1)i1 = getfield_gc(p1, 'intval')i2 = int_add(i0, i1)if (overflowed) goto ...p2 = new_with_vtable('W_IntObject')setfield_gc(p2, i2, 'intval').... + + + + + + + + + JITCODE + + + + compile-time + runtime + + + META-TRACER + + + + + OPTIMIZER + + + + + BACKEND + + + + + ASSEMBLER + + + + diff --git a/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg b/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/pypytrace.svg @@ -0,0 +1,346 @@ + + + + + + + + + + image/svg+xml + + + + + + + python+dis+trace0+trace1+trace2+trace3 + + + def fn(): c = a+b ... + + + LOAD_GLOBAL ALOAD_GLOBAL BBINARY_ADDSTORE_FAST C + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2)... + + + + ...p0 = getfield_gc(p0, 'func_globals')p2 = getfield_gc(p1, 'strval')call(dict_lookup, p0, p2)... + + + ...guard_class(p0, W_IntObject)i0 = getfield_gc(p0, 'intval')guard_class(p1, W_IntObject)i1 = getfield_gc(p1, 'intval')i2 = int_add(00, i1)guard_not_overflow()p2 = new_with_vtable('W_IntObject')setfield_gc(p2, i2, 'intval')... + + + ...p0 = getfield_gc(p0, 'locals_w')setarrayitem_gc(p0, i0, p1).... + + diff --git a/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg b/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/tracing-phases.svg @@ -0,0 +1,473 @@ + + + +image/svg+xmlinterp+tracing+compilation+running+cold_guard+compiled_loop+bridge+hot_guard + +Interpretation + + + +Tracing + + +hotloopdetected + + +Compilation + + +Running + + +coldguardfailed + + +enteringcompiledloop + + +guardfailure + + + + + +hot + + +hotguardfailed + + + \ No newline at end of file diff --git a/talk/bucharest2016/jit-frontend/stylesheet.latex b/talk/bucharest2016/jit-frontend/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/stylesheet.latex @@ -0,0 +1,11 @@ +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/bucharest2016/jit-frontend/talk.pdf b/talk/bucharest2016/jit-frontend/talk.pdf new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4ad8022637420b08f8a93bc24ddfd00d4b13adb2 GIT binary patch [cut] diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -0,0 +1,544 @@ +.. include:: beamerdefs.txt + +================================ +PyPy Intro and JIT Frontend +================================ + +About this talk +---------------- + +* What is PyPy? What is RPython? + +* Tracing JIT 101 + +* PyPy JIT frontend and optimizer + + - "how we manage to make things fast" + + +Part 1 +------- + +**PyPy introduction** + +What is PyPy? +-------------- + +* For most people, the final product: + +|scriptsize| + +.. sourcecode:: python + + $ pypy + Python 2.7.10 (173add34cdd2, Mar 15 2016, 23:00:19) + [PyPy 5.1.0-alpha0 with GCC 4.8.4] on linux2 + >>>> import test.pystone + >>>> test.pystone.main() + Pystone(1.1) time for 50000 passes = 0.0473992 + This machine benchmarks at 1.05487e+06 pystones/second + +|end_scriptsize| + +* More in general: a broader project, ecosystem and community + + +PyPy as a project +------------------ + +* ``rpython``: a fancy compiler + + - source code: "statically typed Python with type inference and metaprogramming" + + - fancy features: C-like performance, GC, meta-JIT + + - "like GCC" (it statically produces a binary) + + - you can run RPython programs on top of CPython (veeery slow, for + development only) + +|pause| + +* ``pypy``: a Python interpreter + + - "like CPython", but written in RPython + + - CPython : GCC = PyPy : RPython + + + +Important fact +--------------- + +* We **did not** write a JIT compiler for Python + +* The "meta JIT" works with all RPython programs + +* The "Python JIT" is automatically generated from the interpreter + +* Writing an interpreter is vastly easier than a compiler + +* Other interpreters: smalltalk, prolog, ruby, php, ... + + +The final product +------------------ + +* ``rpython`` + ``pypy``: the final binary you download and execute + + - a Python interpreter + + - with a GC + + - with a JIT + + - fast + + + +Part 2 +------ + +**Overview of tracing JITs** + + +Assumptions +----------- + +* Pareto Principle (80-20 rule) + + - the 20% of the program accounts for the 80% of the runtime + + - **hot-spots** + +* Fast Path principle + + - optimize only what is necessary + + - fall back for uncommon cases + +|pause| + +* Most of runtime spent in **loops** + +* Always the same code paths (likely) + + +Tracing JIT +----------- + +* Interpret the program as usual + +* Detect **hot** loops + +* Tracing phase + + - **linear** trace + +* Compiling + +* Execute + + - guards to ensure correctness + +* Profit :-) + + +Tracing JIT phases +------------------- + +.. animage:: diagrams/tracing-phases-p*.pdf + :align: center From pypy.commits at gmail.com Sun Apr 3 18:21:27 2016 From: pypy.commits at gmail.com (antocuni) Date: Sun, 03 Apr 2016 15:21:27 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: write the slides about trace trees Message-ID: <57019767.4577c20a.208c2.ffff8a81@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5628:b7ac115da338 Date: 2016-04-03 16:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/b7ac115da338/ Log: write the slides about trace trees diff --git a/talk/bucharest2016/jit-frontend/Makefile b/talk/bucharest2016/jit-frontend/Makefile --- a/talk/bucharest2016/jit-frontend/Makefile +++ b/talk/bucharest2016/jit-frontend/Makefile @@ -3,7 +3,7 @@ # http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py -talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf +talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf diagrams/tracetree-p0.pdf rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit @@ -22,8 +22,8 @@ # diagrams/trace-p0.pdf: diagrams/trace.svg # cd diagrams && inkscapeslide.py trace.svg -# diagrams/tracetree-p0.pdf: diagrams/tracetree.svg -# cd diagrams && inkscapeslide.py tracetree.svg +diagrams/tracetree-p0.pdf: diagrams/tracetree.svg + cd diagrams && inkscapeslide.py tracetree.svg diagrams/architecture-p0.pdf: diagrams/architecture.svg cd diagrams && inkscapeslide.py architecture.svg diff --git a/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg b/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg @@ -0,0 +1,488 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + trace+looptrace, guard_sign+blackhole+interp+call_jittedtrace+guard_sign+bridge+loop2+loop + + + label(start, i0, a0)v0 = int_lt(i0, 2000)guard_true(v0)v1 = int_mod(i0, 2)v2 = int_eq(v1, 0)guard_true(v1)a1 = int_add(a0, 10)i1 = int_add(i0, 1)jump(start, i1, a1) + + + + + + + + a1 = int_mul(a0, 2)i1 = int_add(i0, 1)jump(start, i1, a1) + + HOT FAIL + + + + + diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -152,10 +152,38 @@ :scale: 100% -Trace trees ------------ +Trace trees (1) +--------------- -WRITE ME +|scriptsize| +|example<| |small| tracetree.py |end_small| |>| + +.. sourcecode:: python + + def foo(): + a = 0 + i = 0 + N = 100 + while i < N: + if i%2 == 0: + a += 1 + else: + a *= 2; + i += 1 + return a + +|end_example| +|end_scriptsize| + + +Trace trees (2) +--------------- + +.. animage:: diagrams/tracetree-p*.pdf + :align: center + :scale: 30% + + Part 3 ------ From pypy.commits at gmail.com Sun Apr 3 18:21:29 2016 From: pypy.commits at gmail.com (antocuni) Date: Sun, 03 Apr 2016 15:21:29 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: regenerate pdf Message-ID: <57019769.c1621c0a.dd98f.466a@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5629:bb172b3eaf26 Date: 2016-04-04 00:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/bb172b3eaf26/ Log: regenerate pdf diff --git a/talk/bucharest2016/jit-frontend/talk.pdf b/talk/bucharest2016/jit-frontend/talk.pdf index 4ad8022637420b08f8a93bc24ddfd00d4b13adb2..c74ff0a9eb5d7e988a9424ae7c94708182ad5a1b GIT binary patch [cut] From pypy.commits at gmail.com Sun Apr 3 18:25:21 2016 From: pypy.commits at gmail.com (antocuni) Date: Sun, 03 Apr 2016 15:25:21 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add an XXX Message-ID: <57019851.0113c20a.5044a.ffff855e@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5630:8de7beaf20d3 Date: 2016-04-04 00:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/8de7beaf20d3/ Log: add an XXX diff --git a/blog/draft/jit-leaner-frontend.rst b/blog/draft/jit-leaner-frontend.rst --- a/blog/draft/jit-leaner-frontend.rst +++ b/blog/draft/jit-leaner-frontend.rst @@ -46,6 +46,7 @@ pointers to store the "operations" object after tracing, we use a compact list of 16-bit integers (with 16bit pointers in between). On 64bit machine the wins are tremendous - it's 4x more efficient to use 16bit pointers than full 64bit pointers. +.. XXX: I assume you are talking about "memory efficiency": we should be clearer Additionally those pointers have a much better defined lifespan, so we don't need to bother tracking them by the GC, which also saves quite a bit of time. From pypy.commits at gmail.com Mon Apr 4 02:52:43 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 03 Apr 2016 23:52:43 -0700 (PDT) Subject: [pypy-commit] pypy default: we use enum34 not enum Message-ID: <57020f3b.04c31c0a.75891.ffffb7d0@mx.google.com> Author: fijal Branch: Changeset: r83512:a7e2b12ac538 Date: 2016-04-04 09:51 +0300 http://bitbucket.org/pypy/pypy/changeset/a7e2b12ac538/ Log: we use enum34 not enum diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 From pypy.commits at gmail.com Mon Apr 4 14:55:59 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 04 Apr 2016 11:55:59 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Allow specifying path as file descriptor in os.listdir() Message-ID: <5702b8bf.85b01c0a.f20c8.ffffe069@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83513:c6615ca37d32 Date: 2016-04-04 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/c6615ca37d32/ Log: Allow specifying path as file descriptor in os.listdir() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -744,35 +744,56 @@ raise wrap_oserror(space, e) - at unwrap_spec(w_dirname=WrappedDefault(u".")) -def listdir(space, w_dirname): - """Return a list containing the names of the entries in the directory. + at unwrap_spec(w_path=WrappedDefault(u".")) +def listdir(space, w_path): + """listdir(path='.') -> list_of_filenames -\tpath: path of directory to list +Return a list containing the names of the files in the directory. +The list is in arbitrary order. It does not include the special +entries '.' and '..' even if they are present in the directory. -The list is in arbitrary order. It does not include the special -entries '.' and '..' even if they are present in the directory.""" +path can be specified as either str or bytes. If path is bytes, + the filenames returned will also be bytes; in all other circumstances + the filenames returned will be str. +On some platforms, path may also be specified as an open file descriptor; + the file descriptor must refer to a directory. + If this functionality is unavailable, using it raises NotImplementedError.""" + if space.isinstance_w(w_path, space.w_bytes): + dirname = space.str0_w(w_path) + try: + result = rposix.listdir(dirname) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + return space.newlist_bytes(result) try: - if space.isinstance_w(w_dirname, space.w_unicode): - dirname = FileEncoder(space, w_dirname) + path = space.fsencode_w(w_path) + except OperationError as operr: + if not rposix.HAVE_FDOPENDIR: + raise oefmt(space.w_TypeError, + "listdir: illegal type for path argument") + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + fd = unwrap_fd(space, w_path) + try: + result = rposix.fdlistdir(fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + dirname = FileEncoder(space, w_path) + try: result = rposix.listdir(dirname) - len_result = len(result) - result_w = [None] * len_result - for i in range(len_result): - if _WIN32: - result_w[i] = space.wrap(result[i]) - else: - w_bytes = space.wrapbytes(result[i]) - result_w[i] = space.fsdecode(w_bytes) - return space.newlist(result_w) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + len_result = len(result) + result_w = [None] * len_result + for i in range(len_result): + if _WIN32: + result_w[i] = space.wrap(result[i]) else: - dirname = space.str0_w(w_dirname) - result = rposix.listdir(dirname) - # The list comprehension is a workaround for an obscure translation - # bug. - return space.newlist_bytes([x for x in result]) - except OSError, e: - raise wrap_oserror2(space, e, w_dirname) + w_bytes = space.wrapbytes(result[i]) + result_w[i] = space.fsdecode(w_bytes) + return space.newlist(result_w) def pipe(space): "Create a pipe. Returns (read_end, write_end)." From pypy.commits at gmail.com Mon Apr 4 19:46:22 2016 From: pypy.commits at gmail.com (stefanor) Date: Mon, 04 Apr 2016 16:46:22 -0700 (PDT) Subject: [pypy-commit] pypy default: Skip PTY tests that hang forever on kFreeBSD Message-ID: <5702fcce.6614c20a.1d71f.6dd9@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83514:9059c53718bb Date: 2016-04-04 16:40 -0700 http://bitbucket.org/pypy/pypy/changeset/9059c53718bb/ Log: Skip PTY tests that hang forever on kFreeBSD I don't know if this is expected behaviour. These all look skipped on darwin, which is presumably similar to FreeBSD, here. https://bugs.debian.org/742965 also seems relevant diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,8 @@ import pytest - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty From pypy.commits at gmail.com Mon Apr 4 19:46:24 2016 From: pypy.commits at gmail.com (stefanor) Date: Mon, 04 Apr 2016 16:46:24 -0700 (PDT) Subject: [pypy-commit] pypy default: Pretend to be using a terminal that supports clear Message-ID: <5702fcd0.2457c20a.b8b0d.68c7@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83515:accad9a1fe9c Date: 2016-04-04 16:45 -0700 http://bitbucket.org/pypy/pypy/changeset/accad9a1fe9c/ Log: Pretend to be using a terminal that supports clear Some tests use readline on an internal pty. These fail if TERM is not set to a terminal that supports "clear". diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -62,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,5 +1,7 @@ import pytest +from .infrastructure import sane_term + @pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " "'kfreebsd' in sys.platform") @@ -12,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. From pypy.commits at gmail.com Tue Apr 5 04:02:10 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Apr 2016 01:02:10 -0700 (PDT) Subject: [pypy-commit] pypy default: (fijal, arigo) Message-ID: <57037102.c856c20a.08e1.ffffd793@mx.google.com> Author: Armin Rigo Branch: Changeset: r83516:7c00ff49e3ee Date: 2016-04-05 12:01 +0300 http://bitbucket.org/pypy/pypy/changeset/7c00ff49e3ee/ Log: (fijal, arigo) test and fix: unpack() on a Trace which contains half-written operations (here because it got a FrontendTagOverflow) diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -141,6 +141,8 @@ return self.pos >= self.end def _next(self): + if self.done(): + raise IndexError res = rffi.cast(lltype.Signed, self.trace._ops[self.pos]) self.pos += 1 return res @@ -483,8 +485,11 @@ def unpack(self): iter = self.get_iter() ops = [] - while not iter.done(): - ops.append(iter.next()) + try: + while True: + ops.append(iter.next()) + except IndexError: + pass return iter.inputargs, ops def tag(kind, pos): diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -1,5 +1,6 @@ - +import py from rpython.jit.metainterp.opencoder import Trace, untag, TAGINT, TAGBOX +from rpython.jit.metainterp.opencoder import FrontendTagOverflow from rpython.jit.metainterp.resoperation import rop, AbstractResOp from rpython.jit.metainterp.history import ConstInt, IntFrontendOp from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer @@ -204,3 +205,9 @@ t.record_op(rop.ESCAPE_N, [ConstInt(3)]) t.record_op(rop.FINISH, [i4]) assert t.get_dead_ranges() == [0, 0, 0, 0, 0, 3, 4, 5] + + def test_tag_overflow(self): + t = Trace([], metainterp_sd) + i0 = FakeOp(100000) + py.test.raises(FrontendTagOverflow, t.record_op, rop.FINISH, [i0]) + assert t.unpack() == ([], []) From pypy.commits at gmail.com Tue Apr 5 05:57:27 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Apr 2016 02:57:27 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-history-rewriting: (fijal, arigo) Message-ID: <57038c07.c50b1c0a.31980.ffffd200@mx.google.com> Author: Armin Rigo Branch: cleanup-history-rewriting Changeset: r83517:a41ee91c5a0e Date: 2016-04-05 11:15 +0300 http://bitbucket.org/pypy/pypy/changeset/a41ee91c5a0e/ Log: (fijal, arigo) a branch to remove the occasional rewriting of history done by pyjitpl From pypy.commits at gmail.com Tue Apr 5 06:01:22 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 03:01:22 -0700 (PDT) Subject: [pypy-commit] pypy default: remove dependency on get_rpy_roots in vmprof and maintain a weaklist of Message-ID: <57038cf2.0d3f1c0a.efece.ffffd887@mx.google.com> Author: Remi Meier Branch: Changeset: r83518:cf086a5af05f Date: 2016-04-05 13:00 +0300 http://bitbucket.org/pypy/pypy/changeset/cf086a5af05f/ Log: remove dependency on get_rpy_roots in vmprof and maintain a weaklist of code objects instead diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -1,11 +1,12 @@ import sys, os from rpython.rlib.objectmodel import specialize, we_are_translated -from rpython.rlib import jit, rgc, rposix +from rpython.rlib import jit, rposix from rpython.rlib.rvmprof import cintf from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance from rpython.rtyper.lltypesystem import rffi, llmemory from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rweaklist import RWeakListMixin MAX_FUNC_NAME = 1023 @@ -35,7 +36,7 @@ self._cleanup_() self._code_unique_id = 4 self.cintf = cintf.setup() - + def _cleanup_(self): self.is_enabled = False @@ -55,6 +56,8 @@ self._code_unique_id = uid if self.is_enabled: self._write_code_registration(uid, full_name_func(code)) + else: + code._vmprof_weak_list.add_handle(code) def register_code_object_class(self, CodeClass, full_name_func): """NOT_RPYTHON @@ -80,14 +83,19 @@ CodeClass._vmprof_unique_id = 0 # default value: "unknown" self._code_classes.add(CodeClass) # - def try_cast_to_code(gcref): - return rgc.try_cast_gcref_to_instance(CodeClass, gcref) + class WeakCodeObjectList(RWeakListMixin): + def __init__(self): + self.initialize() + CodeClass._vmprof_weak_list = WeakCodeObjectList() # def gather_all_code_objs(): - all_code_objs = rgc.do_get_objects(try_cast_to_code) - for code in all_code_objs: - uid = code._vmprof_unique_id - if uid != 0: + all_code_wrefs = CodeClass._vmprof_weak_list.get_all_handles() + for wref in all_code_wrefs: + code = wref() + if code is not None: + uid = code._vmprof_unique_id + if uid != 0: + self._write_code_registration(uid, full_name_func(code)) self._write_code_registration(uid, full_name_func(code)) prev() # make a chained list of the gather() functions for all diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -80,7 +80,7 @@ return 0 assert f() == 0 - fn = compile(f, []) + fn = compile(f, [], gcpolicy="minimark") assert fn() == 0 From pypy.commits at gmail.com Tue Apr 5 08:19:26 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 05:19:26 -0700 (PDT) Subject: [pypy-commit] stmgc default: prefix some accidentially exposed functions Message-ID: <5703ad4e.29cec20a.0742.43d0@mx.google.com> Author: Remi Meier Branch: Changeset: r1984:b7f8a106095f Date: 2016-04-05 15:19 +0300 http://bitbucket.org/pypy/stmgc/changeset/b7f8a106095f/ Log: prefix some accidentially exposed functions diff --git a/c8/stm/atomic.h b/c8/stm/atomic.h --- a/c8/stm/atomic.h +++ b/c8/stm/atomic.h @@ -24,16 +24,16 @@ #if defined(__i386__) || defined(__amd64__) - static inline void spin_loop(void) { asm("pause" : : : "memory"); } - static inline void write_fence(void) { asm("" : : : "memory"); } + static inline void stm_spin_loop(void) { asm("pause" : : : "memory"); } + static inline void stm_write_fence(void) { asm("" : : : "memory"); } /*# define atomic_exchange(ptr, old, new) do { \ (old) = __sync_lock_test_and_set(ptr, new); \ } while (0)*/ #else - static inline void spin_loop(void) { asm("" : : : "memory"); } - static inline void write_fence(void) { __sync_synchronize(); } + static inline void stm_spin_loop(void) { asm("" : : : "memory"); } + static inline void stm_write_fence(void) { __sync_synchronize(); } /*# define atomic_exchange(ptr, old, new) do { \ (old) = *(ptr); \ @@ -42,19 +42,19 @@ #endif -static inline void _spinlock_acquire(uint8_t *plock) { +static inline void _stm_spinlock_acquire(uint8_t *plock) { retry: if (__builtin_expect(__sync_lock_test_and_set(plock, 1) != 0, 0)) { - spin_loop(); + stm_spin_loop(); goto retry; } } -static inline void _spinlock_release(uint8_t *plock) { +static inline void _stm_spinlock_release(uint8_t *plock) { assert(*plock == 1); __sync_lock_release(plock); } -#define spinlock_acquire(lock) _spinlock_acquire(&(lock)) -#define spinlock_release(lock) _spinlock_release(&(lock)) +#define stm_spinlock_acquire(lock) _stm_spinlock_acquire(&(lock)) +#define stm_spinlock_release(lock) _stm_spinlock_release(&(lock)) #endif /* _STM_ATOMIC_H */ diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1218,7 +1218,7 @@ /* but first, emit commit-event of this thread: */ timing_event(STM_SEGMENT->running_thread, STM_TRANSACTION_COMMIT); STM_SEGMENT->running_thread = NULL; - write_fence(); + stm_write_fence(); assert(_stm_detached_inevitable_from_thread == -1); _stm_detached_inevitable_from_thread = 0; } @@ -1540,7 +1540,7 @@ 0. We have to wait for this to happen bc. otherwise, eg. _stm_detach_inevitable_transaction is not safe to do yet */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); soon_finished_or_inevitable_thread_segment(); diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -333,12 +333,12 @@ static inline void acquire_privatization_lock(int segnum) { - spinlock_acquire(get_priv_segment(segnum)->privatization_lock); + stm_spinlock_acquire(get_priv_segment(segnum)->privatization_lock); } static inline void release_privatization_lock(int segnum) { - spinlock_release(get_priv_segment(segnum)->privatization_lock); + stm_spinlock_release(get_priv_segment(segnum)->privatization_lock); } static inline bool all_privatization_locks_acquired(void) diff --git a/c8/stm/detach.c b/c8/stm/detach.c --- a/c8/stm/detach.c +++ b/c8/stm/detach.c @@ -107,7 +107,7 @@ is reset to a value different from -1 */ dprintf(("reattach_transaction: busy wait...\n")); while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); /* then retry */ goto restart; @@ -157,7 +157,7 @@ /* busy-loop: wait until _stm_detached_inevitable_from_thread is reset to a value different from -1 */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); goto restart; } if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread, @@ -209,7 +209,7 @@ /* busy-loop: wait until _stm_detached_inevitable_from_thread is reset to a value different from -1 */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); goto restart; } } diff --git a/c8/stm/finalizer.c b/c8/stm/finalizer.c --- a/c8/stm/finalizer.c +++ b/c8/stm/finalizer.c @@ -30,7 +30,7 @@ { /* move finalizer lists to g_finalizers for major collections */ while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) { - spin_loop(); + stm_spin_loop(); } if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { @@ -515,7 +515,7 @@ while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) { /* somebody is adding more finalizers (_commit_finalizer()) */ - spin_loop(); + stm_spin_loop(); } struct finalizers_s copy = g_finalizers; assert(copy.running_next == NULL); diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -75,7 +75,7 @@ /* uncommon case: need to initialize some more pages */ - spinlock_acquire(lock_growth_large); + stm_spinlock_acquire(lock_growth_large); char *start = uninitialized_page_start; if (addr + size > start) { @@ -99,7 +99,7 @@ ((struct object_s*)addr)->stm_flags = 0; - spinlock_release(lock_growth_large); + stm_spinlock_release(lock_growth_large); return (stm_char*)(addr - stm_object_pages); } @@ -178,7 +178,7 @@ DEBUG_EXPECT_SEGFAULT(true); release_privatization_lock(STM_SEGMENT->segment_num); - write_fence(); /* make sure 'nobj' is fully initialized from + stm_write_fence(); /* make sure 'nobj' is fully initialized from all threads here */ return (object_t *)nobj; } diff --git a/c8/stm/hashtable.c b/c8/stm/hashtable.c --- a/c8/stm/hashtable.c +++ b/c8/stm/hashtable.c @@ -216,7 +216,7 @@ } biggertable->resize_counter = rc; - write_fence(); /* make sure that 'biggertable' is valid here, + stm_write_fence(); /* make sure that 'biggertable' is valid here, and make sure 'table->resize_counter' is updated ('table' must be immutable from now on). */ VOLATILE_HASHTABLE(hashtable)->table = biggertable; @@ -278,7 +278,7 @@ just now. In both cases, this thread must simply spin loop. */ if (IS_EVEN(rc)) { - spin_loop(); + stm_spin_loop(); goto restart; } /* in the other cases, we need to grab the RESIZING_LOCK. @@ -348,7 +348,7 @@ hashtable->additions++; } table->items[i] = entry; - write_fence(); /* make sure 'table->items' is written here */ + stm_write_fence(); /* make sure 'table->items' is written here */ VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ stm_read((object_t*)entry); return entry; @@ -437,7 +437,7 @@ table = VOLATILE_HASHTABLE(hashtable)->table; rc = VOLATILE_TABLE(table)->resize_counter; if (IS_EVEN(rc)) { - spin_loop(); + stm_spin_loop(); goto restart; } diff --git a/c8/stm/largemalloc.c b/c8/stm/largemalloc.c --- a/c8/stm/largemalloc.c +++ b/c8/stm/largemalloc.c @@ -116,12 +116,12 @@ static void lm_lock(void) { - spinlock_acquire(lm.lock); + stm_spinlock_acquire(lm.lock); } static void lm_unlock(void) { - spinlock_release(lm.lock); + stm_spinlock_release(lm.lock); } diff --git a/c8/stm/queue.c b/c8/stm/queue.c --- a/c8/stm/queue.c +++ b/c8/stm/queue.c @@ -77,7 +77,7 @@ stm_queue_segment_t *seg = &queue->segs[i]; struct stm_priv_segment_info_s *pseg = get_priv_segment(i + 1); - spinlock_acquire(pseg->active_queues_lock); + stm_spinlock_acquire(pseg->active_queues_lock); if (seg->active) { assert(pseg->active_queues != NULL); @@ -91,7 +91,7 @@ assert(!seg->old_objects_popped); } - spinlock_release(pseg->active_queues_lock); + stm_spinlock_release(pseg->active_queues_lock); queue_free_entries(seg->added_in_this_transaction); queue_free_entries(seg->old_objects_popped); @@ -102,12 +102,12 @@ static inline void queue_lock_acquire(void) { int num = STM_SEGMENT->segment_num; - spinlock_acquire(get_priv_segment(num)->active_queues_lock); + stm_spinlock_acquire(get_priv_segment(num)->active_queues_lock); } static inline void queue_lock_release(void) { int num = STM_SEGMENT->segment_num; - spinlock_release(get_priv_segment(num)->active_queues_lock); + stm_spinlock_release(get_priv_segment(num)->active_queues_lock); } static void queue_activate(stm_queue_t *queue, stm_queue_segment_t *seg) @@ -133,7 +133,7 @@ #pragma push_macro("STM_SEGMENT") #undef STM_PSEGMENT #undef STM_SEGMENT - spinlock_acquire(pseg->active_queues_lock); + stm_spinlock_acquire(pseg->active_queues_lock); bool added_any_old_entries = false; bool finished_more_tasks = false; @@ -177,11 +177,11 @@ } dprintf(("items move to old_entries in queue %p\n", queue)); - spinlock_acquire(queue->old_entries_lock); + stm_spinlock_acquire(queue->old_entries_lock); old = queue->old_entries; tail->next = old; queue->old_entries = head; - spinlock_release(queue->old_entries_lock); + stm_spinlock_release(queue->old_entries_lock); added_any_old_entries = true; } @@ -196,7 +196,7 @@ tree_free(pseg->active_queues); pseg->active_queues = NULL; - spinlock_release(pseg->active_queues_lock); + stm_spinlock_release(pseg->active_queues_lock); if (added_any_old_entries) cond_broadcast(C_QUEUE_OLD_ENTRIES); @@ -267,11 +267,11 @@ can free and reuse this entry. Then the compare_and_swap succeeds, but the value written is outdated nonsense. */ - spinlock_acquire(queue->old_entries_lock); + stm_spinlock_acquire(queue->old_entries_lock); entry = queue->old_entries; if (entry != NULL) queue->old_entries = entry->next; - spinlock_release(queue->old_entries_lock); + stm_spinlock_release(queue->old_entries_lock); if (entry != NULL) { /* successfully popped the old 'entry'. It remains in the diff --git a/c8/stm/smallmalloc.c b/c8/stm/smallmalloc.c --- a/c8/stm/smallmalloc.c +++ b/c8/stm/smallmalloc.c @@ -53,7 +53,7 @@ lock of pages.c to prevent any remapping from occurring under our feet. */ - spinlock_acquire(gmfp_lock); + stm_spinlock_acquire(gmfp_lock); if (free_uniform_pages == NULL) { @@ -90,7 +90,7 @@ } } - spinlock_release(gmfp_lock); + stm_spinlock_release(gmfp_lock); return; out_of_memory: diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -98,7 +98,7 @@ void _stm_commit_transaction(void); void _stm_leave_noninevitable_transactional_zone(void); #define _stm_detach_inevitable_transaction(tl) do { \ - write_fence(); \ + stm_write_fence(); \ assert(_stm_detached_inevitable_from_thread == 0); \ if (stmcb_timing_event != NULL && tl->self_or_0_if_atomic != 0) \ {stmcb_timing_event(tl, STM_TRANSACTION_DETACH, NULL);} \ From pypy.commits at gmail.com Tue Apr 5 08:33:16 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 05:33:16 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: remove dependency on get_rpy_roots in vmprof and maintain a weaklist of Message-ID: <5703b08c.26b0c20a.59d69.141e@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83519:5431b6525b3c Date: 2016-04-05 13:00 +0300 http://bitbucket.org/pypy/pypy/changeset/5431b6525b3c/ Log: remove dependency on get_rpy_roots in vmprof and maintain a weaklist of code objects instead diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -1,11 +1,12 @@ import sys, os from rpython.rlib.objectmodel import specialize, we_are_translated -from rpython.rlib import jit, rgc, rposix +from rpython.rlib import jit, rposix from rpython.rlib.rvmprof import cintf from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance from rpython.rtyper.lltypesystem import rffi, llmemory from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rweaklist import RWeakListMixin MAX_FUNC_NAME = 1023 @@ -35,7 +36,7 @@ self._cleanup_() self._code_unique_id = 4 self.cintf = cintf.setup() - + def _cleanup_(self): self.is_enabled = False @@ -55,6 +56,8 @@ self._code_unique_id = uid if self.is_enabled: self._write_code_registration(uid, full_name_func(code)) + else: + code._vmprof_weak_list.add_handle(code) def register_code_object_class(self, CodeClass, full_name_func): """NOT_RPYTHON @@ -80,14 +83,19 @@ CodeClass._vmprof_unique_id = 0 # default value: "unknown" self._code_classes.add(CodeClass) # - def try_cast_to_code(gcref): - return rgc.try_cast_gcref_to_instance(CodeClass, gcref) + class WeakCodeObjectList(RWeakListMixin): + def __init__(self): + self.initialize() + CodeClass._vmprof_weak_list = WeakCodeObjectList() # def gather_all_code_objs(): - all_code_objs = rgc.do_get_objects(try_cast_to_code) - for code in all_code_objs: - uid = code._vmprof_unique_id - if uid != 0: + all_code_wrefs = CodeClass._vmprof_weak_list.get_all_handles() + for wref in all_code_wrefs: + code = wref() + if code is not None: + uid = code._vmprof_unique_id + if uid != 0: + self._write_code_registration(uid, full_name_func(code)) self._write_code_registration(uid, full_name_func(code)) prev() # make a chained list of the gather() functions for all diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -80,7 +80,7 @@ return 0 assert f() == 0 - fn = compile(f, []) + fn = compile(f, [], gcpolicy="minimark") assert fn() == 0 From pypy.commits at gmail.com Tue Apr 5 08:33:18 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 05:33:18 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: import stmgc Message-ID: <5703b08e.c7371c0a.78bdb.14b9@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83520:79247965065e Date: 2016-04-05 15:28 +0300 http://bitbucket.org/pypy/pypy/changeset/79247965065e/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -8c9162341945 +b7f8a106095f diff --git a/rpython/translator/stm/src_stm/stm/atomic.h b/rpython/translator/stm/src_stm/stm/atomic.h --- a/rpython/translator/stm/src_stm/stm/atomic.h +++ b/rpython/translator/stm/src_stm/stm/atomic.h @@ -24,16 +24,16 @@ #if defined(__i386__) || defined(__amd64__) - static inline void spin_loop(void) { asm("pause" : : : "memory"); } - static inline void write_fence(void) { asm("" : : : "memory"); } + static inline void stm_spin_loop(void) { asm("pause" : : : "memory"); } + static inline void stm_write_fence(void) { asm("" : : : "memory"); } /*# define atomic_exchange(ptr, old, new) do { \ (old) = __sync_lock_test_and_set(ptr, new); \ } while (0)*/ #else - static inline void spin_loop(void) { asm("" : : : "memory"); } - static inline void write_fence(void) { __sync_synchronize(); } + static inline void stm_spin_loop(void) { asm("" : : : "memory"); } + static inline void stm_write_fence(void) { __sync_synchronize(); } /*# define atomic_exchange(ptr, old, new) do { \ (old) = *(ptr); \ @@ -42,19 +42,19 @@ #endif -static inline void _spinlock_acquire(uint8_t *plock) { +static inline void _stm_spinlock_acquire(uint8_t *plock) { retry: if (__builtin_expect(__sync_lock_test_and_set(plock, 1) != 0, 0)) { - spin_loop(); + stm_spin_loop(); goto retry; } } -static inline void _spinlock_release(uint8_t *plock) { +static inline void _stm_spinlock_release(uint8_t *plock) { assert(*plock == 1); __sync_lock_release(plock); } -#define spinlock_acquire(lock) _spinlock_acquire(&(lock)) -#define spinlock_release(lock) _spinlock_release(&(lock)) +#define stm_spinlock_acquire(lock) _stm_spinlock_acquire(&(lock)) +#define stm_spinlock_release(lock) _stm_spinlock_release(&(lock)) #endif /* _STM_ATOMIC_H */ diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -1218,7 +1218,7 @@ /* but first, emit commit-event of this thread: */ timing_event(STM_SEGMENT->running_thread, STM_TRANSACTION_COMMIT); STM_SEGMENT->running_thread = NULL; - write_fence(); + stm_write_fence(); assert(_stm_detached_inevitable_from_thread == -1); _stm_detached_inevitable_from_thread = 0; } @@ -1540,7 +1540,7 @@ 0. We have to wait for this to happen bc. otherwise, eg. _stm_detach_inevitable_transaction is not safe to do yet */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); assert(_stm_detached_inevitable_from_thread == 0); soon_finished_or_inevitable_thread_segment(); diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -333,12 +333,12 @@ static inline void acquire_privatization_lock(int segnum) { - spinlock_acquire(get_priv_segment(segnum)->privatization_lock); + stm_spinlock_acquire(get_priv_segment(segnum)->privatization_lock); } static inline void release_privatization_lock(int segnum) { - spinlock_release(get_priv_segment(segnum)->privatization_lock); + stm_spinlock_release(get_priv_segment(segnum)->privatization_lock); } static inline bool all_privatization_locks_acquired(void) diff --git a/rpython/translator/stm/src_stm/stm/detach.c b/rpython/translator/stm/src_stm/stm/detach.c --- a/rpython/translator/stm/src_stm/stm/detach.c +++ b/rpython/translator/stm/src_stm/stm/detach.c @@ -107,7 +107,7 @@ is reset to a value different from -1 */ dprintf(("reattach_transaction: busy wait...\n")); while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); /* then retry */ goto restart; @@ -157,7 +157,7 @@ /* busy-loop: wait until _stm_detached_inevitable_from_thread is reset to a value different from -1 */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); goto restart; } if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread, @@ -209,7 +209,7 @@ /* busy-loop: wait until _stm_detached_inevitable_from_thread is reset to a value different from -1 */ while (_stm_detached_inevitable_from_thread == -1) - spin_loop(); + stm_spin_loop(); goto restart; } } diff --git a/rpython/translator/stm/src_stm/stm/finalizer.c b/rpython/translator/stm/src_stm/stm/finalizer.c --- a/rpython/translator/stm/src_stm/stm/finalizer.c +++ b/rpython/translator/stm/src_stm/stm/finalizer.c @@ -30,7 +30,7 @@ { /* move finalizer lists to g_finalizers for major collections */ while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) { - spin_loop(); + stm_spin_loop(); } if (STM_PSEGMENT->finalizers->run_finalizers != NULL) { @@ -515,7 +515,7 @@ while (__sync_lock_test_and_set(&g_finalizers.lock, 1) != 0) { /* somebody is adding more finalizers (_commit_finalizer()) */ - spin_loop(); + stm_spin_loop(); } struct finalizers_s copy = g_finalizers; assert(copy.running_next == NULL); diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -75,7 +75,7 @@ /* uncommon case: need to initialize some more pages */ - spinlock_acquire(lock_growth_large); + stm_spinlock_acquire(lock_growth_large); char *start = uninitialized_page_start; if (addr + size > start) { @@ -99,7 +99,7 @@ ((struct object_s*)addr)->stm_flags = 0; - spinlock_release(lock_growth_large); + stm_spinlock_release(lock_growth_large); return (stm_char*)(addr - stm_object_pages); } @@ -178,7 +178,7 @@ DEBUG_EXPECT_SEGFAULT(true); release_privatization_lock(STM_SEGMENT->segment_num); - write_fence(); /* make sure 'nobj' is fully initialized from + stm_write_fence(); /* make sure 'nobj' is fully initialized from all threads here */ return (object_t *)nobj; } diff --git a/rpython/translator/stm/src_stm/stm/hashtable.c b/rpython/translator/stm/src_stm/stm/hashtable.c --- a/rpython/translator/stm/src_stm/stm/hashtable.c +++ b/rpython/translator/stm/src_stm/stm/hashtable.c @@ -216,7 +216,7 @@ } biggertable->resize_counter = rc; - write_fence(); /* make sure that 'biggertable' is valid here, + stm_write_fence(); /* make sure that 'biggertable' is valid here, and make sure 'table->resize_counter' is updated ('table' must be immutable from now on). */ VOLATILE_HASHTABLE(hashtable)->table = biggertable; @@ -278,7 +278,7 @@ just now. In both cases, this thread must simply spin loop. */ if (IS_EVEN(rc)) { - spin_loop(); + stm_spin_loop(); goto restart; } /* in the other cases, we need to grab the RESIZING_LOCK. @@ -348,7 +348,7 @@ hashtable->additions++; } table->items[i] = entry; - write_fence(); /* make sure 'table->items' is written here */ + stm_write_fence(); /* make sure 'table->items' is written here */ VOLATILE_TABLE(table)->resize_counter = rc - 6; /* unlock */ stm_read((object_t*)entry); return entry; @@ -437,7 +437,7 @@ table = VOLATILE_HASHTABLE(hashtable)->table; rc = VOLATILE_TABLE(table)->resize_counter; if (IS_EVEN(rc)) { - spin_loop(); + stm_spin_loop(); goto restart; } diff --git a/rpython/translator/stm/src_stm/stm/largemalloc.c b/rpython/translator/stm/src_stm/stm/largemalloc.c --- a/rpython/translator/stm/src_stm/stm/largemalloc.c +++ b/rpython/translator/stm/src_stm/stm/largemalloc.c @@ -116,12 +116,12 @@ static void lm_lock(void) { - spinlock_acquire(lm.lock); + stm_spinlock_acquire(lm.lock); } static void lm_unlock(void) { - spinlock_release(lm.lock); + stm_spinlock_release(lm.lock); } diff --git a/rpython/translator/stm/src_stm/stm/queue.c b/rpython/translator/stm/src_stm/stm/queue.c --- a/rpython/translator/stm/src_stm/stm/queue.c +++ b/rpython/translator/stm/src_stm/stm/queue.c @@ -77,7 +77,7 @@ stm_queue_segment_t *seg = &queue->segs[i]; struct stm_priv_segment_info_s *pseg = get_priv_segment(i + 1); - spinlock_acquire(pseg->active_queues_lock); + stm_spinlock_acquire(pseg->active_queues_lock); if (seg->active) { assert(pseg->active_queues != NULL); @@ -91,7 +91,7 @@ assert(!seg->old_objects_popped); } - spinlock_release(pseg->active_queues_lock); + stm_spinlock_release(pseg->active_queues_lock); queue_free_entries(seg->added_in_this_transaction); queue_free_entries(seg->old_objects_popped); @@ -102,12 +102,12 @@ static inline void queue_lock_acquire(void) { int num = STM_SEGMENT->segment_num; - spinlock_acquire(get_priv_segment(num)->active_queues_lock); + stm_spinlock_acquire(get_priv_segment(num)->active_queues_lock); } static inline void queue_lock_release(void) { int num = STM_SEGMENT->segment_num; - spinlock_release(get_priv_segment(num)->active_queues_lock); + stm_spinlock_release(get_priv_segment(num)->active_queues_lock); } static void queue_activate(stm_queue_t *queue, stm_queue_segment_t *seg) @@ -133,7 +133,7 @@ #pragma push_macro("STM_SEGMENT") #undef STM_PSEGMENT #undef STM_SEGMENT - spinlock_acquire(pseg->active_queues_lock); + stm_spinlock_acquire(pseg->active_queues_lock); bool added_any_old_entries = false; bool finished_more_tasks = false; @@ -177,11 +177,11 @@ } dprintf(("items move to old_entries in queue %p\n", queue)); - spinlock_acquire(queue->old_entries_lock); + stm_spinlock_acquire(queue->old_entries_lock); old = queue->old_entries; tail->next = old; queue->old_entries = head; - spinlock_release(queue->old_entries_lock); + stm_spinlock_release(queue->old_entries_lock); added_any_old_entries = true; } @@ -196,7 +196,7 @@ tree_free(pseg->active_queues); pseg->active_queues = NULL; - spinlock_release(pseg->active_queues_lock); + stm_spinlock_release(pseg->active_queues_lock); if (added_any_old_entries) cond_broadcast(C_QUEUE_OLD_ENTRIES); @@ -267,11 +267,11 @@ can free and reuse this entry. Then the compare_and_swap succeeds, but the value written is outdated nonsense. */ - spinlock_acquire(queue->old_entries_lock); + stm_spinlock_acquire(queue->old_entries_lock); entry = queue->old_entries; if (entry != NULL) queue->old_entries = entry->next; - spinlock_release(queue->old_entries_lock); + stm_spinlock_release(queue->old_entries_lock); if (entry != NULL) { /* successfully popped the old 'entry'. It remains in the diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.c b/rpython/translator/stm/src_stm/stm/smallmalloc.c --- a/rpython/translator/stm/src_stm/stm/smallmalloc.c +++ b/rpython/translator/stm/src_stm/stm/smallmalloc.c @@ -53,7 +53,7 @@ lock of pages.c to prevent any remapping from occurring under our feet. */ - spinlock_acquire(gmfp_lock); + stm_spinlock_acquire(gmfp_lock); if (free_uniform_pages == NULL) { @@ -90,7 +90,7 @@ } } - spinlock_release(gmfp_lock); + stm_spinlock_release(gmfp_lock); return; out_of_memory: diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -98,7 +98,7 @@ void _stm_commit_transaction(void); void _stm_leave_noninevitable_transactional_zone(void); #define _stm_detach_inevitable_transaction(tl) do { \ - write_fence(); \ + stm_write_fence(); \ assert(_stm_detached_inevitable_from_thread == 0); \ if (stmcb_timing_event != NULL && tl->self_or_0_if_atomic != 0) \ {stmcb_timing_event(tl, STM_TRANSACTION_DETACH, NULL);} \ From pypy.commits at gmail.com Tue Apr 5 08:33:20 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 05:33:20 -0700 (PDT) Subject: [pypy-commit] pypy default: typo Message-ID: <5703b090.41d91c0a.75c8b.20f2@mx.google.com> Author: Remi Meier Branch: Changeset: r83521:9da0c82687ed Date: 2016-04-05 15:32 +0300 http://bitbucket.org/pypy/pypy/changeset/9da0c82687ed/ Log: typo diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -96,7 +96,6 @@ uid = code._vmprof_unique_id if uid != 0: self._write_code_registration(uid, full_name_func(code)) - self._write_code_registration(uid, full_name_func(code)) prev() # make a chained list of the gather() functions for all # the types of code objects From pypy.commits at gmail.com Tue Apr 5 08:35:03 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 05:35:03 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: enable vmprof and nearly pass the tests Message-ID: <5703b0f7.455ec20a.94e31.ffffbec1@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83522:61b4b82f90cb Date: 2016-04-05 15:34 +0300 http://bitbucket.org/pypy/pypy/changeset/61b4b82f90cb/ Log: enable vmprof and nearly pass the tests diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -228,9 +228,6 @@ config.translation.stm = True config.translation.thread = True config.objspace.usemodules.pypystm = True - # for now, disable _vmprof: the JIT's stm parts are not adapted - # to track the stack depth - config.objspace.usemodules._vmprof = False # we don't support rlib.rawrefcount for our GC, so we need # to disable cpyext... config.objspace.usemodules.cpyext = False diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -96,7 +96,6 @@ uid = code._vmprof_unique_id if uid != 0: self._write_code_registration(uid, full_name_func(code)) - self._write_code_registration(uid, full_name_func(code)) prev() # make a chained list of the gather() functions for all # the types of code objects diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -6,7 +6,11 @@ from rpython.rlib.nonconst import NonConstant -def test_vmprof_execute_code_1(): +STM_TRANSLATE_KWARGS = {"gcpolicy": "stmgc", "thread": True, "stm": True} + + + at py.test.mark.parametrize("stmparams", [{}, STM_TRANSLATE_KWARGS]) +def test_vmprof_execute_code_1(stmparams): class MyCode: pass @@ -26,11 +30,12 @@ return 0 assert f() == 0 - fn = compile(f, []) + fn = compile(f, [], **stmparams) assert fn() == 0 -def test_vmprof_execute_code_2(): + at py.test.mark.parametrize("stmparams", [{}, STM_TRANSLATE_KWARGS]) +def test_vmprof_execute_code_2(stmparams): class MyCode: pass @@ -54,11 +59,12 @@ return 0 assert f() == 0 - fn = compile(f, []) + fn = compile(f, [], **stmparams) assert fn() == 0 -def test_register_code(): + at py.test.mark.parametrize("stmparams", [{"gcpolicy": "minimark"}, STM_TRANSLATE_KWARGS]) +def test_register_code(stmparams): class MyCode: pass @@ -80,11 +86,12 @@ return 0 assert f() == 0 - fn = compile(f, [], gcpolicy="minimark") + fn = compile(f, [], **stmparams) assert fn() == 0 -def test_enable(): + at py.test.mark.parametrize("stmparams", [{"gcpolicy": "minimark"}, STM_TRANSLATE_KWARGS]) +def test_enable(stmparams): class MyCode: pass @@ -136,7 +143,7 @@ assert f() == 0 assert os.path.exists(tmpfilename) - fn = compile(f, [], gcpolicy="minimark") + fn = compile(f, [], **stmparams) assert fn() == 0 try: import vmprof diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -274,11 +274,11 @@ gc_startup_code = RefcountingGcPolicy.gc_startup_code.im_func - def gettransformer(self): + def gettransformer(self, translator): if self.db.translator.config.translation.stm: from rpython.memory.gctransform import nogcstm - return nogcstm.NoneSTMGCTransformer(self.db.translator) - return BoehmGcPolicy.gettransformer(self) + return nogcstm.NoneSTMGCTransformer(translator) + return BoehmGcPolicy.gettransformer(self, translator) def compilation_info(self): eci = BasicGcPolicy.compilation_info(self) From pypy.commits at gmail.com Tue Apr 5 10:01:25 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 07:01:25 -0700 (PDT) Subject: [pypy-commit] pypy default: fix test Message-ID: <5703c535.6672c20a.22f0f.7f5a@mx.google.com> Author: Remi Meier Branch: Changeset: r83523:82e8cf34e4ab Date: 2016-04-05 17:00 +0300 http://bitbucket.org/pypy/pypy/changeset/82e8cf34e4ab/ Log: fix test diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,7 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +22,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name From pypy.commits at gmail.com Tue Apr 5 10:23:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Apr 2016 07:23:48 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Don't attempt to define posix-only constants on Windows Message-ID: <5703ca74.d4df1c0a.1448d.4a65@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83524:efeca6950e2e Date: 2016-04-05 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/efeca6950e2e/ Log: Don't attempt to define posix-only constants on Windows diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1752,17 +1752,6 @@ 'unistd.h', 'fcntl.h'], ) - AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') - AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') - AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') - AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') - AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH') - UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') - UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') - TIMESPEC = rffi_platform.Struct('struct timespec', [ - ('tv_sec', rffi.TIME_T), - ('tv_nsec', rffi.LONG)]) - for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve fdopendir fpathconf fstat fstatat fstatvfs ftruncate futimens futimes futimesat linkat chflags lchflags lchmod lchown @@ -1771,7 +1760,28 @@ locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) cConfig = rffi_platform.configure(CConfig) globals().update(cConfig) -TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) + +if not _WIN32: + class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') + AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') + AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH') + UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') + UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') + TIMESPEC = rffi_platform.Struct('struct timespec', [ + ('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) + + cConfig = rffi_platform.configure(CConfig) + globals().update(cConfig) + TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) if HAVE_FACCESSAT: c_faccessat = external('faccessat', From pypy.commits at gmail.com Tue Apr 5 11:00:31 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 05 Apr 2016 08:00:31 -0700 (PDT) Subject: [pypy-commit] pypy default: fix a strange case Message-ID: <5703d30f.865a1c0a.33b0c.4b89@mx.google.com> Author: fijal Branch: Changeset: r83525:0c9b0eb268bd Date: 2016-04-05 17:59 +0300 http://bitbucket.org/pypy/pypy/changeset/0c9b0eb268bd/ Log: fix a strange case diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -690,7 +690,7 @@ self.trace = Trace(inpargs, metainterp_sd) self.inputargs = inpargs - if self._cache: + if self._cache is not None: # hack to record the ops *after* we know our inputargs for (opnum, argboxes, op, descr) in self._cache: pos = self.trace.record_op(opnum, argboxes, descr) From pypy.commits at gmail.com Tue Apr 5 11:02:24 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 05 Apr 2016 08:02:24 -0700 (PDT) Subject: [pypy-commit] pypy default: fix strange annotation errors for small targets Message-ID: <5703d380.53371c0a.b7c23.763d@mx.google.com> Author: fijal Branch: Changeset: r83526:987ea970c2b7 Date: 2016-04-05 18:01 +0300 http://bitbucket.org/pypy/pypy/changeset/987ea970c2b7/ Log: fix strange annotation errors for small targets diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf From pypy.commits at gmail.com Tue Apr 5 14:15:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Apr 2016 11:15:12 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <570400b0.e5ecc20a.ac7e8.ffffd233@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83527:e186dd71a5e7 Date: 2016-04-05 19:13 +0100 http://bitbucket.org/pypy/pypy/changeset/e186dd71a5e7/ Log: hg merge default diff too long, truncating to 2000 out of 10146 lines diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,7 +48,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,15 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -247,8 +247,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -316,13 +316,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback def remove_traceback_module_frames(self, module_name): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -88,6 +88,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', 'normalize_exc' : 'interp_magic.normalize_exc', 'StdErrPrinter' : 'interp_stderrprinter.W_StdErrPrinter', } diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -148,6 +148,26 @@ raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj + @unwrap_spec(w_value=WrappedDefault(None), w_tb=WrappedDefault(None)) def normalize_exc(space, w_type, w_value=None, w_tb=None): operr = OperationError(w_type, w_value, w_tb) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -51,3 +51,16 @@ assert decode_long(b'\x00\x80', 'little', False) == 32768 assert decode_long(b'\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + assert _promote(u"abc") == u"abc" + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2951,6 +2951,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2959,6 +2989,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -23,51 +23,42 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, str) or + not all(isinstance(a, str) for a in attrs) + ): + raise TypeError("attribute name must be a string, not %r" % + type(attr).__name__) + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, str) or - not all(isinstance(a, str) for a in attrs) - ): - raise TypeError("attribute name must be a string, not %r" % - type(attr).__name__) - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -50,7 +50,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty @@ -61,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,10 @@ import pytest +from .infrastructure import sane_term - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty @@ -11,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -939,9 +939,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +949,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1092,8 +1092,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -530,7 +530,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -212,7 +212,7 @@ # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) # op.setarg(1, ConstInt(scale)) # op.setarg(2, v_length) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(opnum) or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -324,13 +324,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -370,8 +370,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -400,7 +400,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 @@ -633,8 +632,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -708,7 +706,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,7 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +22,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -286,7 +286,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.opnum + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -298,8 +299,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -675,7 +718,7 @@ assert not hasattr(loop, '_targettoken') for i in range(position): op = loop.operations[i] - if (not op.has_no_side_effect() + if (not rop.has_no_side_effect(op.opnum) or op.type not in (INT, FLOAT)): position = i break # cannot move the LABEL later @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -645,15 +645,28 @@ pass elif gloc is not bloc: self.mov(gloc, bloc) + offset = self.mc.get_relative_pos() self.mc.JMP_l(0) + self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) - offset = self.mc.get_relative_pos() - 4 rawstart = self.materialize_loop(looptoken) - # update the jump to the real trace - self._patch_jump_for_descr(rawstart + offset, asminfo.rawstart) + # update the jump (above) to the real trace + self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def _patch_jump_to(self, adr_jump_offset, adr_new_target): + assert adr_jump_offset != 0 + offset = adr_new_target - (adr_jump_offset + 5) + mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) + if rx86.fits_in_32bits(offset): + mc.JMP_l(offset) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.copy_to_raw_memory(adr_jump_offset) + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -791,10 +804,6 @@ def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr.adr_jump_offset - self._patch_jump_for_descr(adr_jump_offset, adr_new_target) - faildescr.adr_jump_offset = 0 # means "patched" - - def _patch_jump_for_descr(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -815,6 +824,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -358,11 +358,11 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if op.is_jit_debug(): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -476,7 +476,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.getopnum() + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -488,8 +489,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = prepare_oplist[opnum](self, op) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -688,6 +688,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars @@ -718,6 +722,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1316,6 +1316,21 @@ tr = Transformer(None, None) py.test.raises(NotImplementedError, tr.rewrite_operation, op) +def test_no_fixedsizearray(): + A = lltype.FixedSizeArray(lltype.Signed, 5) + v_x = varoftype(lltype.Ptr(A)) + op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)], + varoftype(lltype.Signed)) + tr = Transformer(None, None) + tr.graph = 'demo' + py.test.raises(NotImplementedError, tr.rewrite_operation, op) + op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed), + Constant(42, lltype.Signed)], + varoftype(lltype.Void)) + e = py.test.raises(NotImplementedError, tr.rewrite_operation, op) + assert str(e.value) == ( + "'demo' uses %r, which is not supported by the JIT codewriter" % (A,)) + def _test_threadlocalref_get(loop_inv): from rpython.rlib.rthread import ThreadLocalField tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1585,7 +1585,6 @@ def _done_with_this_frame(self): # rare case: we only get there if the blackhole interps all returned # normally (in general we get a ContinueRunningNormally exception). - sd = self.builder.metainterp_sd kind = self._return_type if kind == 'v': raise jitexc.DoneWithThisFrameVoid() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -27,12 +27,11 @@ class CompileData(object): memo = None + log_noopt = True def forget_optimization_info(self): - for arg in self.start_label.getarglist(): + for arg in self.trace.inputargs: arg.set_forwarded(None) - for op in self.operations: - op.set_forwarded(None) class LoopCompileData(CompileData): """ An object that accumulates all of the necessary info for @@ -40,15 +39,13 @@ This is the case of label() ops label() """ - def __init__(self, start_label, end_label, operations, - call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.end_label = end_label + def __init__(self, trace, runtime_boxes, call_pure_results=None, + enable_opts=None): self.enable_opts = enable_opts - assert start_label.getopnum() == rop.LABEL - assert end_label.getopnum() == rop.LABEL - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results + assert runtime_boxes is not None + self.runtime_boxes = runtime_boxes def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll): from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer, @@ -56,23 +53,21 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_preamble(self.start_label, self.end_label, - self.operations, + return opt.optimize_preamble(self.trace, + self.runtime_boxes, self.call_pure_results, self.box_names_memo) else: opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace, self.call_pure_results) class SimpleCompileData(CompileData): """ This represents label() ops jump with no extra info associated with the label """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results self.enable_opts = enable_opts @@ -81,17 +76,17 @@ #assert not unroll opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace.get_iter(), + self.call_pure_results) class BridgeCompileData(CompileData): """ This represents ops() with a jump at the end that goes to some From pypy.commits at gmail.com Tue Apr 5 14:15:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Apr 2016 11:15:15 -0700 (PDT) Subject: [pypy-commit] pypy py3k: port test to py3 Message-ID: <570400b3.03dd1c0a.ad9cf.ffffa22a@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83528:a252fd879fe7 Date: 2016-04-05 19:14 +0100 http://bitbucket.org/pypy/pypy/changeset/a252fd879fe7/ Log: port test to py3 diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -54,16 +54,16 @@ import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) - buf = buffer(arr) + buf = memoryview(arr) exc = raises(TypeError, "buf[1] = '1'") - assert str(exc.value) == "buffer is read-only" + assert str(exc.value) == "cannot modify read-only memory" if sys.byteorder == 'big': - assert str(buf) == (b'\0\0\0\x01' + assert bytes(buf) == (b'\0\0\0\x01' b'\0\0\0\x02' b'\0\0\0\x03' b'\0\0\0\x04') else: - assert str(buf) == (b'\x01\0\0\0' + assert bytes(buf) == (b'\x01\0\0\0' b'\x02\0\0\0' b'\x03\0\0\0' b'\x04\0\0\0') From pypy.commits at gmail.com Tue Apr 5 18:12:05 2016 From: pypy.commits at gmail.com (stefanor) Date: Tue, 05 Apr 2016 15:12:05 -0700 (PDT) Subject: [pypy-commit] pypy default: The JIT detect_cpu function throws exceptions on some CPUs (that don't support JIT), so it can't be used in non-JIT contexts Message-ID: <57043835.519d1c0a.95986.ffff817c@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83529:2179c165ab7b Date: 2016-04-05 15:11 -0700 http://bitbucket.org/pypy/pypy/changeset/2179c165ab7b/ Log: The JIT detect_cpu function throws exceptions on some CPUs (that don't support JIT), so it can't be used in non-JIT contexts diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -1,3 +1,4 @@ +import platform as host_platform import py import sys from rpython.tool.udir import udir @@ -28,8 +29,7 @@ def setup(): - from rpython.jit.backend import detect_cpu - if detect_cpu.autodetect().startswith(detect_cpu.MODEL_S390_64): + if host_platform.machine() == 's390x': raise VMProfPlatformUnsupported("rvmprof not supported on" " s390x CPUs for now") compile_extra = ['-DRPYTHON_LL2CTYPES'] From pypy.commits at gmail.com Wed Apr 6 00:05:32 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Apr 2016 21:05:32 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <57048b0c.0173c20a.d3d86.3aea@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5631:2645adce9224 Date: 2016-04-06 08:05 +0300 http://bitbucket.org/pypy/extradoc/changeset/2645adce9224/ Log: tweaks diff --git a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt --- a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt +++ b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt @@ -1,4 +1,6 @@ -pypy's assembler backend +======================== +PyPy's assembler backend +======================== input: linear sequence of instructions, called a "trace". @@ -76,9 +78,9 @@ ## GC pointers -Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the positions in the registers and stack frame are fixed by the GC. That's a reason for why we don't have explicit interior pointers. +Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the pointers in the registers and stack frame are updated by the GC. That's a reason for why we don't have explicit interior pointers. -GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" table can actually change if the GC objects move. +GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" in the table is actually updated by the GC if the object move. ## Vectorization From pypy.commits at gmail.com Wed Apr 6 00:15:23 2016 From: pypy.commits at gmail.com (Michael McGee) Date: Tue, 05 Apr 2016 21:15:23 -0700 (PDT) Subject: [pypy-commit] cffi default: Change get_current_ts to always use _Py_atomic_load_relaxed when available. Message-ID: <57048d5b.84c9c20a.ae82d.380e@mx.google.com> Author: Michael McGee Branch: Changeset: r2656:aca84fd9f4e4 Date: 2016-04-05 15:57 -0700 http://bitbucket.org/cffi/cffi/changeset/aca84fd9f4e4/ Log: Change get_current_ts to always use _Py_atomic_load_relaxed when available. diff --git a/c/misc_thread_common.h b/c/misc_thread_common.h --- a/c/misc_thread_common.h +++ b/c/misc_thread_common.h @@ -80,7 +80,7 @@ static PyThreadState *get_current_ts(void) { -#if PY_MAJOR_VERSION >= 3 +#if defined(_Py_atomic_load_relaxed) return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); #else return _PyThreadState_Current; From pypy.commits at gmail.com Wed Apr 6 02:36:36 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 05 Apr 2016 23:36:36 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: make jit backend test for vmprof pass. however, we are still missing resetting Message-ID: <5704ae74.972e1c0a.44568.ffff8711@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83530:8488cce5d3cf Date: 2016-04-06 09:35 +0300 http://bitbucket.org/pypy/pypy/changeset/8488cce5d3cf/ Log: make jit backend test for vmprof pass. however, we are still missing resetting the thread locals used by vmprof on transaction abort. diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,8 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin + class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +23,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1020,15 +1020,15 @@ self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # old = current value of vmprof_tl_stack offset = cintf.vmprof_tl_stack.getoffset() - self.mc.MOV_rm(old.value, (tloc.value, offset)) + self.mc.MOV_rm(old.value, (self.SEGMENT_NO, tloc.value, offset)) # eax->next = old - self.mc.MOV_mr((eax.value, 0), old.value) + self.mc.MOV_mr((self.SEGMENT_NO, eax.value, 0), old.value) # eax->value = my esp - self.mc.MOV_mr((eax.value, WORD), esp.value) + self.mc.MOV_mr((self.SEGMENT_NO, eax.value, WORD), esp.value) # eax->kind = VMPROF_JITTED_TAG - self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) + self.mc.MOV_mi((self.SEGMENT_NO, eax.value, WORD * 2), VMPROF_JITTED_TAG) # save in vmprof_tl_stack the new eax - self.mc.MOV_mr((tloc.value, offset), eax.value) + self.mc.MOV_mr((self.SEGMENT_NO, tloc.value, offset), eax.value) def _call_footer_vmprof(self): from rpython.rlib.rvmprof.rvmprof import cintf @@ -1039,14 +1039,14 @@ self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax offset = cintf.vmprof_tl_stack.getoffset() - self.mc.MOV_mr((edx.value, offset), eax.value) + self.mc.MOV_mr((self.SEGMENT_NO, edx.value, offset), eax.value) def _call_header(self): self.mc.SUB_ri(esp.value, self._get_whole_frame_size() * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) if IS_X86_64: self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) - if not self.cpu.gc_ll_descr.stm and self.cpu.translate_support_code: + if self.cpu.translate_support_code: self._call_header_vmprof() # on X86_64, this uses esi if IS_X86_64: self.mc.MOV_rr(ebp.value, edi.value) @@ -1086,7 +1086,7 @@ self._call_footer_shadowstack() # the return value is the jitframe - if not self.cpu.gc_ll_descr.stm and self.cpu.translate_support_code: + if self.cpu.translate_support_code: self._call_footer_vmprof() self.mc.MOV_rr(eax.value, ebp.value) From pypy.commits at gmail.com Wed Apr 6 02:37:07 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Apr 2016 23:37:07 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-history-rewriting: Reorganize do_residual_call() producing Message-ID: <5704ae93.839a1c0a.5d8d0.7fe1@mx.google.com> Author: Armin Rigo Branch: cleanup-history-rewriting Changeset: r83531:773e8eda8298 Date: 2016-04-06 09:26 +0300 http://bitbucket.org/pypy/pypy/changeset/773e8eda8298/ Log: Reorganize do_residual_call() producing call_may_force/call_release_gil/libffi calls etc. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1592,43 +1592,68 @@ resbox = self._do_jit_force_virtual(allboxes, descr, pc) if resbox is not None: return resbox + + # 1. preparation self.metainterp.vable_and_vrefs_before_residual_call() + + # 2. actually do the call now (we'll have cases later): the + # result is stored into 'c_result' for now, which is a Const + cpu = self.metainterp.cpu tp = descr.get_normalized_result_type() - resbox = NOT_HANDLED - opnum = -1 - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - opnum = rop.call_may_force_for_descr(descr) - resbox = self.metainterp.direct_libffi_call(allboxes, descr, - tp) - if resbox is NOT_HANDLED: - if effectinfo.is_call_release_gil(): - opnum = rop.call_release_gil_for_descr(descr) - resbox = self.metainterp.direct_call_release_gil(allboxes, - descr, tp) - elif tp == 'i': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_I, allboxes, descr=descr) - elif tp == 'r': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_R, allboxes, descr=descr) - elif tp == 'f': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_F, allboxes, descr=descr) - elif tp == 'v': - self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_N, allboxes, descr=descr) - resbox = None - else: - assert False - if opnum == -1: - opnum = rop.call_may_force_for_descr(descr) - cut_pos = self.metainterp.vrefs_after_residual_call( - self.metainterp._last_op, opnum, allboxes, descr, cut_pos) - vablebox = None + if tp == 'i': + opnum1 = rop.CALL_MAY_FORCE_I + value = executor.execute_varargs(cpu, self, opnum1, + allboxes, descr) + c_result = ConstInt(value) + elif tp == 'r': + opnum1 = rop.CALL_MAY_FORCE_R + value = executor.execute_varargs(cpu, self, opnum1, + allboxes, descr) + c_result = ConstPtr(value) + elif tp == 'f': + opnum1 = rop.CALL_MAY_FORCE_F + value = executor.execute_varargs(cpu, self, opnum1, + allboxes, descr) + c_result = ConstFloat(value) + elif tp == 'v': + opnum1 = rop.CALL_MAY_FORCE_N + executor.execute_varargs(cpu, self, opnum1, + allboxes, descr) + c_result = None + + # 3. after this call, check the vrefs. If any have been + # forced by the call, then we record in the trace a + # VIRTUAL_REF_FINISH---before we record any CALL + self.metainterp.vrefs_after_residual_call() + + # 4. figure out what kind of CALL we need to record + # from the effectinfo and the 'assembler_call' flag if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( - self.metainterp._last_op, allboxes, descr, assembler_call_jd, cut_pos) - if resbox and resbox.type != 'v': + allboxes, descr, assembler_call_jd) + else: + vablebox = None + resbox = None + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + resbox = self.metainterp.direct_libffi_call(allboxes, descr) + # ^^^ may return None to mean "can't handle it myself" + if resbox is None: + if effectinfo.is_call_release_gil(): + resbox = self.metainterp.direct_call_release_gil( + allboxes, descr) + else: + resbox = self.metainterp.direct_call_may_force( + allboxes, descr) + + # 5. invalidate the heapcache based on the CALL_MAY_FORCE + # operation executed above in step 2 + self.metainterp.heapcache.invalidate_caches(opnum1, descr, allboxes) + + # 6. put 'c_result' back into the recorded operation + if resbox.type == 'v': + resbox = None # for void calls, must return None below + else: + resbox.copy_value_from(c_result) self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) @@ -2170,7 +2195,6 @@ profiler.count_ops(opnum, Counters.RECORDED_OPS) self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resvalue, descr) - self._last_op = op self.attach_debug_info(op) if op.type != 'v': return op @@ -2781,7 +2805,7 @@ force_token], None, descr=vinfo.vable_token_descr) - def vrefs_after_residual_call(self, op, opnum, arglist, descr, cut_pos): + def vrefs_after_residual_call(self): vrefinfo = self.staticdata.virtualref_info for i in range(0, len(self.virtualref_boxes), 2): vrefbox = self.virtualref_boxes[i+1] @@ -2791,9 +2815,7 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - cut_pos = self.stop_tracking_virtualref(i, op, opnum, arglist, - descr, cut_pos) - return cut_pos + self.stop_tracking_virtualref(i) def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2817,19 +2839,14 @@ # have the eventual exception raised (this is normally done # after the call to vable_after_residual_call()). - def stop_tracking_virtualref(self, i, op, opnum, arglist, descr, cut_pos): + def stop_tracking_virtualref(self, i): virtualbox = self.virtualref_boxes[i] vrefbox = self.virtualref_boxes[i+1] - # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE - self.history.cut(cut_pos) # pop the CALL - self.history.record_nospec(rop.VIRTUAL_REF_FINISH, - [vrefbox, virtualbox], None) - cut_pos = self.history.get_trace_position() - newop = self.history.record_nospec(opnum, arglist, descr) - op.set_position(newop.get_position()) - # mark by replacing it with ConstPtr(NULL) + # record VIRTUAL_REF_FINISH here, which is before the actual + # CALL_xxx is recorded + self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) + # mark this situation by replacing the vrefbox with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL - return cut_pos def handle_possible_exception(self): if self.last_exc_value: @@ -3026,24 +3043,26 @@ newop.copy_value_from(op) return newop - def direct_assembler_call(self, op, arglist, descr, targetjitdriver_sd, cut_pos): - """ Generate a direct call to assembler for portal entry point, - patching the CALL_MAY_FORCE that occurred just now. + def direct_call_may_force(self, argboxes, calldescr): + """ Common case: record in the history a CALL_MAY_FORCE with + 'c_result' as the result of that call. (The actual call has + already been done.) """ - self.history.cut(cut_pos) + opnum = rop.call_may_force_for_descr(calldescr) + return self.history.record_nospec(opnum, argboxes, calldescr) + + def direct_assembler_call(self, arglist, calldescr, targetjitdriver_sd): + """ Record in the history a direct call to assembler for portal + entry point. + """ num_green_args = targetjitdriver_sd.num_green_args greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) - opnum = OpHelpers.call_assembler_for_descr(descr) - oldop = op + opnum = OpHelpers.call_assembler_for_descr(calldescr) op = self.history.record_nospec(opnum, args, descr=token) - if opnum == rop.CALL_ASSEMBLER_N: - op = None - else: - op.copy_value_from(oldop) # # To fix an obscure issue, make sure the vable stays alive # longer than the CALL_ASSEMBLER operation. We do it by @@ -3054,7 +3073,7 @@ else: return None, op - def direct_libffi_call(self, argboxes, orig_calldescr, tp): + def direct_libffi_call(self, argboxes, orig_calldescr, c_result): """Generate a direct call to C code using jit_ffi_call() """ # an 'assert' that constant-folds away the rest of this function @@ -3067,7 +3086,7 @@ # box_cif_description = argboxes[1] if not isinstance(box_cif_description, ConstInt): - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() cif_description = box_cif_description.getint() cif_description = llmemory.cast_int_to_adr(cif_description) cif_description = llmemory.cast_adr_to_ptr(cif_description, @@ -3075,7 +3094,7 @@ extrainfo = orig_calldescr.get_extra_info() calldescr = self.cpu.calldescrof_dynamic(cif_description, extrainfo) if calldescr is None: - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() # box_exchange_buffer = argboxes[3] arg_boxes = [] @@ -3106,68 +3125,25 @@ # (that is, errno and SetLastError/GetLastError on Windows) # Note these flags match the ones in clibffi.ll_callback c_saveall = ConstInt(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_I, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_F, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, orig_calldescr) - self.history.record( - rop.CALL_RELEASE_GIL_N, [c_saveall, argboxes[2]] + arg_boxes, - None, descr=calldescr) - box_result = None - else: - assert False - # + opnum = rop.call_release_gil_for_descr(orig_calldescr) + assert opnum == rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [c_saveall, argboxes[2]] + arg_boxes, + calldescr) # note that the result is written back to the exchange_buffer by the # following operation, which should be a raw_store - return box_result - - def direct_call_release_gil(self, argboxes, calldescr, tp): + + def direct_call_release_gil(self, argboxes, calldescr): + if not we_are_translated(): # for llgraph + calldescr._original_func_ = argboxes[0].getint() effectinfo = calldescr.get_extra_info() realfuncaddr, saveerr = effectinfo.call_release_gil_target funcbox = ConstInt(heaptracker.adr2int(realfuncaddr)) savebox = ConstInt(saveerr) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_I, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_F, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, calldescr) - self.history.record(rop.CALL_RELEASE_GIL_N, - [savebox, funcbox] + argboxes[1:], - None, calldescr) - resbox = None - else: - assert False, "no CALL_RELEASE_GIL_R" - - if not we_are_translated(): # for llgraph - calldescr._original_func_ = argboxes[0].getint() - return resbox + opnum = rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [savebox, funcbox] + argboxes[1:], + calldescr) def do_not_in_trace_call(self, allboxes, descr): self.clear_exception() @@ -3187,8 +3163,6 @@ """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" -NOT_HANDLED = history.CONST_FALSE - # ____________________________________________________________ def _get_opimpl_method(name, argcodes): From pypy.commits at gmail.com Wed Apr 6 03:13:30 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Apr 2016 00:13:30 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-history-rewriting: fix fix Message-ID: <5704b71a.e853c20a.43df4.6c7c@mx.google.com> Author: Armin Rigo Branch: cleanup-history-rewriting Changeset: r83532:e89fe690dd02 Date: 2016-04-06 10:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e89fe690dd02/ Log: fix fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1598,28 +1598,30 @@ # 2. actually do the call now (we'll have cases later): the # result is stored into 'c_result' for now, which is a Const - cpu = self.metainterp.cpu + metainterp = self.metainterp tp = descr.get_normalized_result_type() if tp == 'i': opnum1 = rop.CALL_MAY_FORCE_I - value = executor.execute_varargs(cpu, self, opnum1, - allboxes, descr) + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) c_result = ConstInt(value) elif tp == 'r': opnum1 = rop.CALL_MAY_FORCE_R - value = executor.execute_varargs(cpu, self, opnum1, - allboxes, descr) + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) c_result = ConstPtr(value) elif tp == 'f': opnum1 = rop.CALL_MAY_FORCE_F - value = executor.execute_varargs(cpu, self, opnum1, - allboxes, descr) + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) c_result = ConstFloat(value) elif tp == 'v': opnum1 = rop.CALL_MAY_FORCE_N - executor.execute_varargs(cpu, self, opnum1, - allboxes, descr) + executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) c_result = None + else: + assert False # 3. after this call, check the vrefs. If any have been # forced by the call, then we record in the trace a @@ -3073,7 +3075,7 @@ else: return None, op - def direct_libffi_call(self, argboxes, orig_calldescr, c_result): + def direct_libffi_call(self, argboxes, orig_calldescr): """Generate a direct call to C code using jit_ffi_call() """ # an 'assert' that constant-folds away the rest of this function From pypy.commits at gmail.com Wed Apr 6 03:14:43 2016 From: pypy.commits at gmail.com (Raemi) Date: Wed, 06 Apr 2016 00:14:43 -0700 (PDT) Subject: [pypy-commit] pypy default: try to fix this test that previously used to enumerate all code objs by tracing Message-ID: <5704b763.839a1c0a.5d8d0.ffff8d17@mx.google.com> Author: Remi Meier Branch: Changeset: r83533:ac58de5d526f Date: 2016-04-06 10:13 +0300 http://bitbucket.org/pypy/pypy/changeset/ac58de5d526f/ Log: try to fix this test that previously used to enumerate all code objs by tracing the heap and now uses a weakref list. The difference is that weakrefs may point to unreachable objs and we therefore count more than with the other method. diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): From pypy.commits at gmail.com Wed Apr 6 03:29:05 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Apr 2016 00:29:05 -0700 (PDT) Subject: [pypy-commit] pypy default: hg merge cleanup-history-rewriting Message-ID: <5704bac1.aaf8c20a.6370f.7194@mx.google.com> Author: Armin Rigo Branch: Changeset: r83535:58a5417201bf Date: 2016-04-06 10:28 +0300 http://bitbucket.org/pypy/pypy/changeset/58a5417201bf/ Log: hg merge cleanup-history-rewriting Remove a headache of history rewriting in pyjitpl diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1592,43 +1592,70 @@ resbox = self._do_jit_force_virtual(allboxes, descr, pc) if resbox is not None: return resbox + + # 1. preparation self.metainterp.vable_and_vrefs_before_residual_call() + + # 2. actually do the call now (we'll have cases later): the + # result is stored into 'c_result' for now, which is a Const + metainterp = self.metainterp tp = descr.get_normalized_result_type() - resbox = NOT_HANDLED - opnum = -1 - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - opnum = rop.call_may_force_for_descr(descr) - resbox = self.metainterp.direct_libffi_call(allboxes, descr, - tp) - if resbox is NOT_HANDLED: - if effectinfo.is_call_release_gil(): - opnum = rop.call_release_gil_for_descr(descr) - resbox = self.metainterp.direct_call_release_gil(allboxes, - descr, tp) - elif tp == 'i': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_I, allboxes, descr=descr) - elif tp == 'r': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_R, allboxes, descr=descr) - elif tp == 'f': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_F, allboxes, descr=descr) - elif tp == 'v': - self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_N, allboxes, descr=descr) - resbox = None - else: - assert False - if opnum == -1: - opnum = rop.call_may_force_for_descr(descr) - cut_pos = self.metainterp.vrefs_after_residual_call( - self.metainterp._last_op, opnum, allboxes, descr, cut_pos) - vablebox = None + if tp == 'i': + opnum1 = rop.CALL_MAY_FORCE_I + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstInt(value) + elif tp == 'r': + opnum1 = rop.CALL_MAY_FORCE_R + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstPtr(value) + elif tp == 'f': + opnum1 = rop.CALL_MAY_FORCE_F + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstFloat(value) + elif tp == 'v': + opnum1 = rop.CALL_MAY_FORCE_N + executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = None + else: + assert False + + # 3. after this call, check the vrefs. If any have been + # forced by the call, then we record in the trace a + # VIRTUAL_REF_FINISH---before we record any CALL + self.metainterp.vrefs_after_residual_call() + + # 4. figure out what kind of CALL we need to record + # from the effectinfo and the 'assembler_call' flag if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( - self.metainterp._last_op, allboxes, descr, assembler_call_jd, cut_pos) - if resbox and resbox.type != 'v': + allboxes, descr, assembler_call_jd) + else: + vablebox = None + resbox = None + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + resbox = self.metainterp.direct_libffi_call(allboxes, descr) + # ^^^ may return None to mean "can't handle it myself" + if resbox is None: + if effectinfo.is_call_release_gil(): + resbox = self.metainterp.direct_call_release_gil( + allboxes, descr) + else: + resbox = self.metainterp.direct_call_may_force( + allboxes, descr) + + # 5. invalidate the heapcache based on the CALL_MAY_FORCE + # operation executed above in step 2 + self.metainterp.heapcache.invalidate_caches(opnum1, descr, allboxes) + + # 6. put 'c_result' back into the recorded operation + if resbox.type == 'v': + resbox = None # for void calls, must return None below + else: + resbox.copy_value_from(c_result) self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) @@ -2170,7 +2197,6 @@ profiler.count_ops(opnum, Counters.RECORDED_OPS) self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resvalue, descr) - self._last_op = op self.attach_debug_info(op) if op.type != 'v': return op @@ -2781,7 +2807,7 @@ force_token], None, descr=vinfo.vable_token_descr) - def vrefs_after_residual_call(self, op, opnum, arglist, descr, cut_pos): + def vrefs_after_residual_call(self): vrefinfo = self.staticdata.virtualref_info for i in range(0, len(self.virtualref_boxes), 2): vrefbox = self.virtualref_boxes[i+1] @@ -2791,9 +2817,7 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - cut_pos = self.stop_tracking_virtualref(i, op, opnum, arglist, - descr, cut_pos) - return cut_pos + self.stop_tracking_virtualref(i) def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2817,19 +2841,14 @@ # have the eventual exception raised (this is normally done # after the call to vable_after_residual_call()). - def stop_tracking_virtualref(self, i, op, opnum, arglist, descr, cut_pos): + def stop_tracking_virtualref(self, i): virtualbox = self.virtualref_boxes[i] vrefbox = self.virtualref_boxes[i+1] - # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE - self.history.cut(cut_pos) # pop the CALL - self.history.record_nospec(rop.VIRTUAL_REF_FINISH, - [vrefbox, virtualbox], None) - cut_pos = self.history.get_trace_position() - newop = self.history.record_nospec(opnum, arglist, descr) - op.set_position(newop.get_position()) - # mark by replacing it with ConstPtr(NULL) + # record VIRTUAL_REF_FINISH here, which is before the actual + # CALL_xxx is recorded + self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) + # mark this situation by replacing the vrefbox with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL - return cut_pos def handle_possible_exception(self): if self.last_exc_value: @@ -3026,24 +3045,26 @@ newop.copy_value_from(op) return newop - def direct_assembler_call(self, op, arglist, descr, targetjitdriver_sd, cut_pos): - """ Generate a direct call to assembler for portal entry point, - patching the CALL_MAY_FORCE that occurred just now. + def direct_call_may_force(self, argboxes, calldescr): + """ Common case: record in the history a CALL_MAY_FORCE with + 'c_result' as the result of that call. (The actual call has + already been done.) """ - self.history.cut(cut_pos) + opnum = rop.call_may_force_for_descr(calldescr) + return self.history.record_nospec(opnum, argboxes, calldescr) + + def direct_assembler_call(self, arglist, calldescr, targetjitdriver_sd): + """ Record in the history a direct call to assembler for portal + entry point. + """ num_green_args = targetjitdriver_sd.num_green_args greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) - opnum = OpHelpers.call_assembler_for_descr(descr) - oldop = op + opnum = OpHelpers.call_assembler_for_descr(calldescr) op = self.history.record_nospec(opnum, args, descr=token) - if opnum == rop.CALL_ASSEMBLER_N: - op = None - else: - op.copy_value_from(oldop) # # To fix an obscure issue, make sure the vable stays alive # longer than the CALL_ASSEMBLER operation. We do it by @@ -3054,7 +3075,7 @@ else: return None, op - def direct_libffi_call(self, argboxes, orig_calldescr, tp): + def direct_libffi_call(self, argboxes, orig_calldescr): """Generate a direct call to C code using jit_ffi_call() """ # an 'assert' that constant-folds away the rest of this function @@ -3067,7 +3088,7 @@ # box_cif_description = argboxes[1] if not isinstance(box_cif_description, ConstInt): - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() cif_description = box_cif_description.getint() cif_description = llmemory.cast_int_to_adr(cif_description) cif_description = llmemory.cast_adr_to_ptr(cif_description, @@ -3075,7 +3096,7 @@ extrainfo = orig_calldescr.get_extra_info() calldescr = self.cpu.calldescrof_dynamic(cif_description, extrainfo) if calldescr is None: - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() # box_exchange_buffer = argboxes[3] arg_boxes = [] @@ -3106,68 +3127,25 @@ # (that is, errno and SetLastError/GetLastError on Windows) # Note these flags match the ones in clibffi.ll_callback c_saveall = ConstInt(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_I, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_F, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, orig_calldescr) - self.history.record( - rop.CALL_RELEASE_GIL_N, [c_saveall, argboxes[2]] + arg_boxes, - None, descr=calldescr) - box_result = None - else: - assert False - # + opnum = rop.call_release_gil_for_descr(orig_calldescr) + assert opnum == rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [c_saveall, argboxes[2]] + arg_boxes, + calldescr) # note that the result is written back to the exchange_buffer by the # following operation, which should be a raw_store - return box_result - - def direct_call_release_gil(self, argboxes, calldescr, tp): + + def direct_call_release_gil(self, argboxes, calldescr): + if not we_are_translated(): # for llgraph + calldescr._original_func_ = argboxes[0].getint() effectinfo = calldescr.get_extra_info() realfuncaddr, saveerr = effectinfo.call_release_gil_target funcbox = ConstInt(heaptracker.adr2int(realfuncaddr)) savebox = ConstInt(saveerr) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_I, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_F, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, calldescr) - self.history.record(rop.CALL_RELEASE_GIL_N, - [savebox, funcbox] + argboxes[1:], - None, calldescr) - resbox = None - else: - assert False, "no CALL_RELEASE_GIL_R" - - if not we_are_translated(): # for llgraph - calldescr._original_func_ = argboxes[0].getint() - return resbox + opnum = rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [savebox, funcbox] + argboxes[1:], + calldescr) def do_not_in_trace_call(self, allboxes, descr): self.clear_exception() @@ -3187,8 +3165,6 @@ """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" -NOT_HANDLED = history.CONST_FALSE - # ____________________________________________________________ def _get_opimpl_method(name, argcodes): From pypy.commits at gmail.com Wed Apr 6 03:29:03 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Apr 2016 00:29:03 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-history-rewriting: ready to merge Message-ID: <5704babf.04c31c0a.74be7.ffff9b57@mx.google.com> Author: Armin Rigo Branch: cleanup-history-rewriting Changeset: r83534:ac4f212eb7cb Date: 2016-04-06 10:27 +0300 http://bitbucket.org/pypy/pypy/changeset/ac4f212eb7cb/ Log: ready to merge From pypy.commits at gmail.com Wed Apr 6 03:39:31 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Apr 2016 00:39:31 -0700 (PDT) Subject: [pypy-commit] pypy default: Randomly add this here Message-ID: <5704bd33.c7371c0a.29857.ffff9e2b@mx.google.com> Author: Armin Rigo Branch: Changeset: r83536:de13adcdf05a Date: 2016-04-06 10:39 +0300 http://bitbucket.org/pypy/pypy/changeset/de13adcdf05a/ Log: Randomly add this here diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] From pypy.commits at gmail.com Wed Apr 6 04:39:55 2016 From: pypy.commits at gmail.com (palecsandru) Date: Wed, 06 Apr 2016 01:39:55 -0700 (PDT) Subject: [pypy-commit] pypy default: (cfbolz, palecsandru): Fixed what's new that was backed out. The test did not know about the backouts. Message-ID: <5704cb5b.d7b81c0a.6028b.7c17@mx.google.com> Author: Alecsandru Patrascu Branch: Changeset: r83537:6c39efa5fa43 Date: 2016-04-06 11:35 +0300 http://bitbucket.org/pypy/pypy/changeset/6c39efa5fa43/ Log: (cfbolz, palecsandru): Fixed what's new that was backed out. The test did not know about the backouts. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -35,3 +35,4 @@ .. branch: win32-lib-name +.. branch: remove-frame-forcing-in-executioncontext From pypy.commits at gmail.com Wed Apr 6 04:45:39 2016 From: pypy.commits at gmail.com (palecsandru) Date: Wed, 06 Apr 2016 01:45:39 -0700 (PDT) Subject: [pypy-commit] pypy default: (cfbolz, palecsandru): Fix failing test that was not adapted when the implementation changed. Message-ID: <5704ccb3.c856c20a.5399f.ffff9338@mx.google.com> Author: Alecsandru Patrascu Branch: Changeset: r83538:1ceea7a807e1 Date: 2016-04-06 11:44 +0300 http://bitbucket.org/pypy/pypy/changeset/1ceea7a807e1/ Log: (cfbolz, palecsandru): Fix failing test that was not adapted when the implementation changed. diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -53,7 +53,7 @@ assert _promote(1) == 1 assert _promote(1.1) == 1.1 assert _promote("abc") == "abc" - assert _promote(u"abc") == u"abc" + raises(TypeError, _promote, u"abc") l = [] assert _promote(l) is l class A(object): From pypy.commits at gmail.com Wed Apr 6 08:16:34 2016 From: pypy.commits at gmail.com (Raemi) Date: Wed, 06 Apr 2016 05:16:34 -0700 (PDT) Subject: [pypy-commit] stmgc default: add mem_reset_on_abort, a mechanism to reset some memory to the value it had on Message-ID: <5704fe22.c7811c0a.18e8a.1f90@mx.google.com> Author: Remi Meier Branch: Changeset: r1985:ff3079618aaf Date: 2016-04-06 15:16 +0300 http://bitbucket.org/pypy/stmgc/changeset/ff3079618aaf/ Log: add mem_reset_on_abort, a mechanism to reset some memory to the value it had on tx start in case the tx aborts. diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1033,6 +1033,13 @@ check_nursery_at_transaction_start(); + if (tl->mem_reset_on_abort) { + assert(!!tl->mem_stored_for_reset_on_abort); + memcpy(tl->mem_stored_for_reset_on_abort, tl->mem_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); + } + + /* Change read-version here, because if we do stm_validate in the safe-point below, we should not see our old reads from the last transaction. */ @@ -1432,6 +1439,9 @@ if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); + if (tl->mem_reset_on_abort) + memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); invoke_and_clear_user_callbacks(1); /* for abort */ diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -76,6 +76,11 @@ the following raw region of memory is cleared. */ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; + /* mechanism to reset a memory location to the value it had at the start + of the transaction in case of an abort */ + char *mem_reset_on_abort; /* addr */ + size_t mem_bytes_to_reset_on_abort; /* how many bytes */ + char *mem_stored_for_reset_on_abort; /* content at tx start */ /* the next fields are handled internally by the library */ int last_associated_segment_num; /* always a valid seg num */ int thread_local_counter; diff --git a/c8/test/support.py b/c8/test/support.py --- a/c8/test/support.py +++ b/c8/test/support.py @@ -30,6 +30,9 @@ object_t *thread_local_obj; char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; + char *mem_reset_on_abort; /* addr */ + size_t mem_bytes_to_reset_on_abort; /* how many bytes */ + char *mem_stored_for_reset_on_abort; /* content at tx start */ int last_associated_segment_num; struct stm_thread_local_s *prev, *next; void *creating_pthread[2]; diff --git a/c8/test/test_extra.py b/c8/test/test_extra.py --- a/c8/test/test_extra.py +++ b/c8/test/test_extra.py @@ -19,6 +19,10 @@ tl.mem_bytes_to_clear_on_abort = 2 # self.start_transaction() + self.commit_transaction() + assert ffi.string(p) == "hello" + # + self.start_transaction() assert ffi.string(p) == "hello" self.abort_transaction() assert p[0] == '\0' @@ -27,6 +31,27 @@ assert p[3] == 'l' assert p[4] == 'o' + def test_reset_on_abort(self): + p = ffi.new("char[]", "hello") + tl = self.get_stm_thread_local() + assert tl.mem_reset_on_abort == ffi.NULL + tl.mem_reset_on_abort = p + tl.mem_bytes_to_reset_on_abort = 2 + tl.mem_stored_for_reset_on_abort = ffi.new("char[5]") + # + self.start_transaction() + assert ffi.string(p) == "hello" + p[0] = 'w' + self.commit_transaction() + assert ffi.string(p) == "wello" + # + self.start_transaction() + assert ffi.string(p) == "wello" + p[1] = 'a' + p[4] = 'i' + self.abort_transaction() + assert ffi.string(p) == "welli" + def test_call_on_abort(self): p0 = ffi_new_aligned("aaa") p1 = ffi_new_aligned("hello") From pypy.commits at gmail.com Wed Apr 6 08:24:34 2016 From: pypy.commits at gmail.com (florinpapa) Date: Wed, 06 Apr 2016 05:24:34 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (florin, antocuni) Add resource warning test for file descriptors Message-ID: <57050002.e853c20a.43df4.fffffb53@mx.google.com> Author: florinpapa Branch: resource_warning Changeset: r83539:d09f1ccc1094 Date: 2016-04-06 15:08 +0300 http://bitbucket.org/pypy/pypy/changeset/d09f1ccc1094/ Log: (florin, antocuni) Add resource warning test for file descriptors diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1740,6 +1740,13 @@ _warnings.warn(msg, warningcls, stacklevel=stacklevel) """) + def resource_warning(self, msg): + w_msg = self.wrap(msg) + self.appexec([w_msg], + """(msg): + import sys + print >> sys.stderr, msg + """) class AppExecCache(SpaceCache): def build(cache, source): diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -49,6 +49,10 @@ # thread that runs __del__, so no race condition should be possible self.clear_all_weakrefs() if self.stream is not None: + if self.space.sys.resource_warning_enabled: + w_repr = self.space.repr(self) + str_repr = self.space.str_w(w_repr) + self.space.resource_warning("WARNING: unclosed file: " + str_repr) self.enqueue_for_destruction(self.space, W_File.destructor, 'close() method of ') diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -254,6 +254,31 @@ if '__pypy__' in sys.builtin_module_names: assert repr(self.temppath) in g.getvalue() + def test_resource_warning(self): + import os, gc, sys, cStringIO + if '__pypy__' not in sys.builtin_module_names: + skip("pypy specific test") + def fn(): + f = self.file(self.temppath, 'w') + g = cStringIO.StringIO() + preverr = sys.stderr + try: + sys.stderr = g + del f + gc.collect() # force __del__ to be called + finally: + sys.stderr = preverr + return g.getvalue() + + try: + sys.pypy_set_resource_warning(False) + assert fn() == "" + sys.pypy_set_resource_warning(True) + msg = fn() + assert msg.startswith("WARNING: unclosed file: Author: Ronan Lamy Branch: Changeset: r83541:f5f369271c0c Date: 2016-04-06 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/f5f369271c0c/ Log: Update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -36,3 +36,7 @@ .. branch: win32-lib-name .. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. From pypy.commits at gmail.com Wed Apr 6 11:03:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 08:03:08 -0700 (PDT) Subject: [pypy-commit] pypy default: Merge branch 'rposix-for-3' Message-ID: <5705252c.6718c20a.eeb2e.43c2@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83540:70a0c992daf3 Date: 2016-04-06 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/70a0c992daf3/ Log: Merge branch 'rposix-for-3' diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,21 +22,6 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=['sys/stat.h', - 'unistd.h', - 'fcntl.h'], - ) - for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir - fpathconf fstat fstatat fstatvfs ftruncate futimens futimes - futimesat linkat lchflags lchmod lchown lstat lutimes - mkdirat mkfifoat mknodat openat readlinkat renameat - symlinkat unlinkat utimensat""".split(): - locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) -cConfig = rffi_platform.configure(CConfig) -globals().update(cConfig) - class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -618,14 +603,44 @@ config = rffi_platform.configure(CConfig) DIRENT = config['DIRENT'] DIRENTP = lltype.Ptr(DIRENT) - c_opendir = external('opendir', [rffi.CCHARP], DIRP, - save_err=rffi.RFFI_SAVE_ERRNO) + c_opendir = external('opendir', + [rffi.CCHARP], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) + c_fdopendir = external('fdopendir', + [rffi.INT], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) # XXX macro=True is hack to make sure we get the correct kind of # dirent struct (which depends on defines) c_readdir = external('readdir', [DIRP], DIRENTP, macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_closedir = external('closedir', [DIRP], rffi.INT) +def _listdir(dirp): + result = [] + while True: + direntp = c_readdir(dirp) + if not direntp: + error = get_saved_errno() + break + namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) + name = rffi.charp2str(namep) + if name != '.' and name != '..': + result.append(name) + c_closedir(dirp) + if error: + raise OSError(error, "readdir failed") + return result + +def fdlistdir(dirfd): + """ + Like listdir(), except that the directory is specified as an open + file descriptor. + + Note: fdlistdir() closes the file descriptor. + """ + dirp = c_fdopendir(dirfd) + if not dirp: + raise OSError(get_saved_errno(), "opendir failed") + return _listdir(dirp) + @replace_os_function('listdir') @specialize.argtype(0) def listdir(path): @@ -634,20 +649,7 @@ dirp = c_opendir(path) if not dirp: raise OSError(get_saved_errno(), "opendir failed") - result = [] - while True: - direntp = c_readdir(dirp) - if not direntp: - error = get_saved_errno() - break - namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) - name = rffi.charp2str(namep) - if name != '.' and name != '..': - result.append(name) - c_closedir(dirp) - if error: - raise OSError(error, "readdir failed") - return result + return _listdir(dirp) else: # _WIN32 case traits = _preferred_traits(path) win32traits = make_win32_traits(traits) @@ -1739,3 +1741,259 @@ def getcontroller(self): from rpython.rlib.rposix_environ import OsEnvironController return OsEnvironController() + + +# ____________________________________________________________ +# Support for f... and ...at families of POSIX functions + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve + fdopendir fpathconf fstat fstatat fstatvfs ftruncate + futimens futimes futimesat linkat chflags lchflags lchmod lchown + lstat lutimes mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) + +if not _WIN32: + class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') + AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') + AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH') + UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') + UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') + TIMESPEC = rffi_platform.Struct('struct timespec', [ + ('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) + + cConfig = rffi_platform.configure(CConfig) + globals().update(cConfig) + TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) + +if HAVE_FACCESSAT: + c_faccessat = external('faccessat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT) + + def faccessat(pathname, mode, dir_fd=AT_FDCWD, + effective_ids=False, follow_symlinks=True): + """Thin wrapper around faccessat(2) with an interface simlar to + Python3's os.access(). + """ + flags = 0 + if not follow_symlinks: + flags |= AT_SYMLINK_NOFOLLOW + if effective_ids: + flags |= AT_EACCESS + error = c_faccessat(dir_fd, pathname, mode, flags) + return error == 0 + +if HAVE_FCHMODAT: + c_fchmodat = external('fchmodat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchmodat(path, mode, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_fchmodat(dir_fd, path, mode, flag) + handle_posix_error('fchmodat', error) + +if HAVE_FCHOWNAT: + c_fchownat = external('fchownat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchownat(path, owner, group, dir_fd=AT_FDCWD, + follow_symlinks=True, empty_path=False): + flag = 0 + if not follow_symlinks: + flag |= AT_SYMLINK_NOFOLLOW + if empty_path: + flag |= AT_EMPTY_PATH + error = c_fchownat(dir_fd, path, owner, group, flag) + handle_posix_error('fchownat', error) + +if HAVE_FEXECVE: + c_fexecve = external('fexecve', + [rffi.INT, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def fexecve(fd, args, env): + envstrs = [] + for item in env.iteritems(): + envstr = "%s=%s" % item + envstrs.append(envstr) + + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + c_fexecve(fd, l_args, l_env) + + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execve failed") + +if HAVE_LINKAT: + c_linkat = external('linkat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) + + def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, + follow_symlinks=True): + """Thin wrapper around linkat(2) with an interface similar to + Python3's os.link() + """ + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_linkat(src_dir_fd, src, dst_dir_fd, dst, flag) + handle_posix_error('linkat', error) + +if HAVE_FUTIMENS: + c_futimens = external('futimens', [rffi.INT, TIMESPEC2P], rffi.INT) + + def futimens(fd, atime, atime_ns, mtime, mtime_ns): + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + error = c_futimens(fd, l_times) + lltype.free(l_times, flavor='raw') + handle_posix_error('futimens', error) + +if HAVE_UTIMENSAT: + c_utimensat = external('utimensat', + [rffi.INT, rffi.CCHARP, TIMESPEC2P, rffi.INT], rffi.INT) + + def utimensat(pathname, atime, atime_ns, mtime, mtime_ns, + dir_fd=AT_FDCWD, follow_symlinks=True): + """Wrapper around utimensat(2) + + To set access time to the current time, pass atime_ns=UTIME_NOW, + atime is then ignored. + + To set modification time to the current time, pass mtime_ns=UTIME_NOW, + mtime is then ignored. + """ + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_utimensat(dir_fd, pathname, l_times, flag) + lltype.free(l_times, flavor='raw') + handle_posix_error('utimensat', error) + +if HAVE_MKDIRAT: + c_mkdirat = external('mkdirat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkdirat(pathname, mode, dir_fd=AT_FDCWD): + error = c_mkdirat(dir_fd, pathname, mode) + handle_posix_error('mkdirat', error) + +if HAVE_UNLINKAT: + c_unlinkat = external('unlinkat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def unlinkat(pathname, dir_fd=AT_FDCWD, removedir=False): + flag = AT_REMOVEDIR if removedir else 0 + error = c_unlinkat(dir_fd, pathname, flag) + handle_posix_error('unlinkat', error) + +if HAVE_READLINKAT: + c_readlinkat = external( + 'readlinkat', + [rffi.INT, rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) + + def readlinkat(pathname, dir_fd=AT_FDCWD): + pathname = _as_bytes0(pathname) + bufsize = 1023 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = widen(c_readlinkat(dir_fd, pathname, buf, bufsize)) + if res < 0: + lltype.free(buf, flavor='raw') + error = get_saved_errno() # failed + raise OSError(error, "readlinkat failed") + elif res < bufsize: + break # ok + else: + # buf too small, try again with a larger buffer + lltype.free(buf, flavor='raw') + bufsize *= 4 + # convert the result to a string + result = rffi.charp2strn(buf, res) + lltype.free(buf, flavor='raw') + return result + +if HAVE_RENAMEAT: + c_renameat = external( + 'renameat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def renameat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD): + error = c_renameat(src_dir_fd, src, dst_dir_fd, dst) + handle_posix_error('renameat', error) + + +if HAVE_SYMLINKAT: + c_symlinkat = external('symlinkat', + [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def symlinkat(src, dst, dir_fd=AT_FDCWD): + error = c_symlinkat(src, dir_fd, dst) + handle_posix_error('symlinkat', error) + +if HAVE_OPENAT: + c_openat = external('openat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + @enforceargs(s_Str0, int, int, int, typecheck=False) + def openat(path, flags, mode, dir_fd=AT_FDCWD): + fd = c_openat(dir_fd, path, flags, mode) + return handle_posix_error('open', fd) + +if HAVE_MKFIFOAT: + c_mkfifoat = external('mkfifoat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkfifoat(path, mode, dir_fd=AT_FDCWD): + error = c_mkfifoat(dir_fd, path, mode) + handle_posix_error('mkfifoat', error) + +if HAVE_MKNODAT: + c_mknodat = external('mknodat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mknodat(path, mode, device, dir_fd=AT_FDCWD): + error = c_mknodat(dir_fd, path, mode, device) + handle_posix_error('mknodat', error) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -7,6 +7,12 @@ import errno import py +def rposix_requires(funcname): + return py.test.mark.skipif(not hasattr(rposix, funcname), + reason="Requires rposix.%s()" % funcname) + +win_only = py.test.mark.skipif("os.name != 'nt'") + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -29,9 +35,8 @@ for value in times: assert isinstance(value, float) + @py.test.mark.skipif("not hasattr(os, 'getlogin')") def test_getlogin(self): - if not hasattr(os, 'getlogin'): - py.test.skip('posix specific function') try: expected = os.getlogin() except OSError, e: @@ -39,9 +44,8 @@ data = rposix.getlogin() assert data == expected + @win_only def test_utimes(self): - if os.name != 'nt': - py.test.skip('Windows specific feature') # Windows support centiseconds def f(fname, t1): os.utime(fname, (t1, t1)) @@ -51,15 +55,12 @@ t1 = 1159195039.25 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') t1 = 5000000000.0 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime + @win_only def test__getfullpathname(self): - if os.name != 'nt': - py.test.skip('nt specific function') posix = __import__(os.name) sysdrv = os.getenv('SystemDrive', 'C:') stuff = sysdrv + 'stuff' @@ -99,11 +100,25 @@ def test_mkdir(self): filename = str(udir.join('test_mkdir.dir')) rposix.mkdir(filename, 0) - exc = py.test.raises(OSError, rposix.mkdir, filename, 0) - assert exc.value.errno == errno.EEXIST + with py.test.raises(OSError) as excinfo: + rposix.mkdir(filename, 0) + assert excinfo.value.errno == errno.EEXIST if sys.platform == 'win32': assert exc.type is WindowsError + @rposix_requires('mkdirat') + def test_mkdirat(self): + relpath = 'test_mkdirat.dir' + filename = str(udir.join(relpath)) + dirfd = os.open(os.path.dirname(filename), os.O_RDONLY) + try: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + with py.test.raises(OSError) as excinfo: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + assert excinfo.value.errno == errno.EEXIST + finally: + os.close(dirfd) + def test_strerror(self): assert rposix.strerror(2) == os.strerror(2) @@ -116,10 +131,8 @@ os.unlink(filename) + @py.test.mark.skipif("os.name != 'posix'") def test_execve(self): - if os.name != 'posix': - py.test.skip('posix specific function') - EXECVE_ENV = {"foo": "bar", "baz": "quux"} def run_execve(program, args=None, env=None, do_path_lookup=False): @@ -258,11 +271,8 @@ assert rposix.isatty(-1) is False + at py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): - def setup_class(cls): - if not hasattr(os, 'ttyname'): - py.test.skip("no ttyname") - def test_ttyname(self): def f(): import os @@ -426,9 +436,8 @@ except Exception: pass + @win_only def test_is_valid_fd(self): - if os.name != 'nt': - py.test.skip('relevant for windows only') assert rposix.is_valid_fd(0) == 1 fid = open(str(udir.join('validate_test.txt')), 'w') fd = fid.fileno() @@ -448,6 +457,59 @@ def _get_filename(self): return str(udir.join('test_open_ascii')) + @rposix_requires('openat') + def test_openat(self): + def f(dirfd): + try: + fd = rposix.openat('test_open_ascii', os.O_RDONLY, 0777, dirfd) + try: + text = os.read(fd, 50) + return text + finally: + os.close(fd) + except OSError: + return '' + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + assert ll_to_string(interpret(f, [dirfd])) == "test" + finally: + os.close(dirfd) + + @rposix_requires('unlinkat') + def test_unlinkat(self): + def f(dirfd): + return rposix.unlinkat('test_open_ascii', dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) + finally: + os.close(dirfd) + assert not os.path.exists(self.ufilename) + + def test_utimensat(self): + def f(dirfd): + return rposix.utimensat('test_open_ascii', + 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + + def test_fchmodat(self): + def f(dirfd): + return rposix.fchmodat('test_open_ascii', 0777, dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + @@ -465,3 +527,30 @@ os.open('/tmp/t', 0, 0) os.open(u'/tmp/t', 0, 0) compile(f, ()) + + +def test_fdlistdir(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + result = rposix.fdlistdir(dirfd) + # Note: fdlistdir() always closes dirfd + assert result == ['file'] + +def test_symlinkat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + rposix.symlinkat('file', 'link', dir_fd=dirfd) + assert os.readlink(str(tmpdir.join('link'))) == 'file' + finally: + os.close(dirfd) + +def test_renameat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + rposix.renameat('file', 'file2', src_dir_fd=dirfd, dst_dir_fd=dirfd) + finally: + os.close(dirfd) + assert tmpdir.join('file').check(exists=False) + assert tmpdir.join('file2').check(exists=True) From pypy.commits at gmail.com Wed Apr 6 11:07:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 08:07:02 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <57052616.ca941c0a.9c030.3fb4@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83542:c0fce2e85c79 Date: 2016-04-06 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/c0fce2e85c79/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -35,3 +35,8 @@ .. branch: win32-lib-name +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -57,7 +57,7 @@ assert _promote(1) == 1 assert _promote(1.1) == 1.1 assert _promote("abc") == "abc" - assert _promote(u"abc") == u"abc" + raises(TypeError, _promote, u"abc") l = [] assert _promote(l) is l class A(object): diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -60,6 +62,8 @@ pass """, d) + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec_("""def foo2(): diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1592,43 +1592,70 @@ resbox = self._do_jit_force_virtual(allboxes, descr, pc) if resbox is not None: return resbox + + # 1. preparation self.metainterp.vable_and_vrefs_before_residual_call() + + # 2. actually do the call now (we'll have cases later): the + # result is stored into 'c_result' for now, which is a Const + metainterp = self.metainterp tp = descr.get_normalized_result_type() - resbox = NOT_HANDLED - opnum = -1 - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - opnum = rop.call_may_force_for_descr(descr) - resbox = self.metainterp.direct_libffi_call(allboxes, descr, - tp) - if resbox is NOT_HANDLED: - if effectinfo.is_call_release_gil(): - opnum = rop.call_release_gil_for_descr(descr) - resbox = self.metainterp.direct_call_release_gil(allboxes, - descr, tp) - elif tp == 'i': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_I, allboxes, descr=descr) - elif tp == 'r': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_R, allboxes, descr=descr) - elif tp == 'f': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_F, allboxes, descr=descr) - elif tp == 'v': - self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_N, allboxes, descr=descr) - resbox = None - else: - assert False - if opnum == -1: - opnum = rop.call_may_force_for_descr(descr) - cut_pos = self.metainterp.vrefs_after_residual_call( - self.metainterp._last_op, opnum, allboxes, descr, cut_pos) - vablebox = None + if tp == 'i': + opnum1 = rop.CALL_MAY_FORCE_I + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstInt(value) + elif tp == 'r': + opnum1 = rop.CALL_MAY_FORCE_R + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstPtr(value) + elif tp == 'f': + opnum1 = rop.CALL_MAY_FORCE_F + value = executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = ConstFloat(value) + elif tp == 'v': + opnum1 = rop.CALL_MAY_FORCE_N + executor.execute_varargs(metainterp.cpu, metainterp, + opnum1, allboxes, descr) + c_result = None + else: + assert False + + # 3. after this call, check the vrefs. If any have been + # forced by the call, then we record in the trace a + # VIRTUAL_REF_FINISH---before we record any CALL + self.metainterp.vrefs_after_residual_call() + + # 4. figure out what kind of CALL we need to record + # from the effectinfo and the 'assembler_call' flag if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( - self.metainterp._last_op, allboxes, descr, assembler_call_jd, cut_pos) - if resbox and resbox.type != 'v': + allboxes, descr, assembler_call_jd) + else: + vablebox = None + resbox = None + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + resbox = self.metainterp.direct_libffi_call(allboxes, descr) + # ^^^ may return None to mean "can't handle it myself" + if resbox is None: + if effectinfo.is_call_release_gil(): + resbox = self.metainterp.direct_call_release_gil( + allboxes, descr) + else: + resbox = self.metainterp.direct_call_may_force( + allboxes, descr) + + # 5. invalidate the heapcache based on the CALL_MAY_FORCE + # operation executed above in step 2 + self.metainterp.heapcache.invalidate_caches(opnum1, descr, allboxes) + + # 6. put 'c_result' back into the recorded operation + if resbox.type == 'v': + resbox = None # for void calls, must return None below + else: + resbox.copy_value_from(c_result) self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) @@ -2170,7 +2197,6 @@ profiler.count_ops(opnum, Counters.RECORDED_OPS) self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resvalue, descr) - self._last_op = op self.attach_debug_info(op) if op.type != 'v': return op @@ -2781,7 +2807,7 @@ force_token], None, descr=vinfo.vable_token_descr) - def vrefs_after_residual_call(self, op, opnum, arglist, descr, cut_pos): + def vrefs_after_residual_call(self): vrefinfo = self.staticdata.virtualref_info for i in range(0, len(self.virtualref_boxes), 2): vrefbox = self.virtualref_boxes[i+1] @@ -2791,9 +2817,7 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - cut_pos = self.stop_tracking_virtualref(i, op, opnum, arglist, - descr, cut_pos) - return cut_pos + self.stop_tracking_virtualref(i) def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2817,19 +2841,14 @@ # have the eventual exception raised (this is normally done # after the call to vable_after_residual_call()). - def stop_tracking_virtualref(self, i, op, opnum, arglist, descr, cut_pos): + def stop_tracking_virtualref(self, i): virtualbox = self.virtualref_boxes[i] vrefbox = self.virtualref_boxes[i+1] - # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE - self.history.cut(cut_pos) # pop the CALL - self.history.record_nospec(rop.VIRTUAL_REF_FINISH, - [vrefbox, virtualbox], None) - cut_pos = self.history.get_trace_position() - newop = self.history.record_nospec(opnum, arglist, descr) - op.set_position(newop.get_position()) - # mark by replacing it with ConstPtr(NULL) + # record VIRTUAL_REF_FINISH here, which is before the actual + # CALL_xxx is recorded + self.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) + # mark this situation by replacing the vrefbox with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL - return cut_pos def handle_possible_exception(self): if self.last_exc_value: @@ -3026,24 +3045,26 @@ newop.copy_value_from(op) return newop - def direct_assembler_call(self, op, arglist, descr, targetjitdriver_sd, cut_pos): - """ Generate a direct call to assembler for portal entry point, - patching the CALL_MAY_FORCE that occurred just now. + def direct_call_may_force(self, argboxes, calldescr): + """ Common case: record in the history a CALL_MAY_FORCE with + 'c_result' as the result of that call. (The actual call has + already been done.) """ - self.history.cut(cut_pos) + opnum = rop.call_may_force_for_descr(calldescr) + return self.history.record_nospec(opnum, argboxes, calldescr) + + def direct_assembler_call(self, arglist, calldescr, targetjitdriver_sd): + """ Record in the history a direct call to assembler for portal + entry point. + """ num_green_args = targetjitdriver_sd.num_green_args greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) - opnum = OpHelpers.call_assembler_for_descr(descr) - oldop = op + opnum = OpHelpers.call_assembler_for_descr(calldescr) op = self.history.record_nospec(opnum, args, descr=token) - if opnum == rop.CALL_ASSEMBLER_N: - op = None - else: - op.copy_value_from(oldop) # # To fix an obscure issue, make sure the vable stays alive # longer than the CALL_ASSEMBLER operation. We do it by @@ -3054,7 +3075,7 @@ else: return None, op - def direct_libffi_call(self, argboxes, orig_calldescr, tp): + def direct_libffi_call(self, argboxes, orig_calldescr): """Generate a direct call to C code using jit_ffi_call() """ # an 'assert' that constant-folds away the rest of this function @@ -3067,7 +3088,7 @@ # box_cif_description = argboxes[1] if not isinstance(box_cif_description, ConstInt): - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() cif_description = box_cif_description.getint() cif_description = llmemory.cast_int_to_adr(cif_description) cif_description = llmemory.cast_adr_to_ptr(cif_description, @@ -3075,7 +3096,7 @@ extrainfo = orig_calldescr.get_extra_info() calldescr = self.cpu.calldescrof_dynamic(cif_description, extrainfo) if calldescr is None: - return NOT_HANDLED + return None # cannot be handled by direct_libffi_call() # box_exchange_buffer = argboxes[3] arg_boxes = [] @@ -3106,68 +3127,25 @@ # (that is, errno and SetLastError/GetLastError on Windows) # Note these flags match the ones in clibffi.ll_callback c_saveall = ConstInt(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_I, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, orig_calldescr) - box_result = self.history.record( - rop.CALL_RELEASE_GIL_F, [c_saveall, argboxes[2]] + arg_boxes, - value, descr=calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, orig_calldescr) - self.history.record( - rop.CALL_RELEASE_GIL_N, [c_saveall, argboxes[2]] + arg_boxes, - None, descr=calldescr) - box_result = None - else: - assert False - # + opnum = rop.call_release_gil_for_descr(orig_calldescr) + assert opnum == rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [c_saveall, argboxes[2]] + arg_boxes, + calldescr) # note that the result is written back to the exchange_buffer by the # following operation, which should be a raw_store - return box_result - - def direct_call_release_gil(self, argboxes, calldescr, tp): + + def direct_call_release_gil(self, argboxes, calldescr): + if not we_are_translated(): # for llgraph + calldescr._original_func_ = argboxes[0].getint() effectinfo = calldescr.get_extra_info() realfuncaddr, saveerr = effectinfo.call_release_gil_target funcbox = ConstInt(heaptracker.adr2int(realfuncaddr)) savebox = ConstInt(saveerr) - if tp == 'i': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_I, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_I, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'f': - value = executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_F, - argboxes, calldescr) - resbox = self.history.record(rop.CALL_RELEASE_GIL_F, - [savebox, funcbox] + argboxes[1:], - value, calldescr) - elif tp == 'v': - executor.execute_varargs(self.cpu, self, - rop.CALL_MAY_FORCE_N, - argboxes, calldescr) - self.history.record(rop.CALL_RELEASE_GIL_N, - [savebox, funcbox] + argboxes[1:], - None, calldescr) - resbox = None - else: - assert False, "no CALL_RELEASE_GIL_R" - - if not we_are_translated(): # for llgraph - calldescr._original_func_ = argboxes[0].getint() - return resbox + opnum = rop.call_release_gil_for_descr(calldescr) + return self.history.record_nospec(opnum, + [savebox, funcbox] + argboxes[1:], + calldescr) def do_not_in_trace_call(self, allboxes, descr): self.clear_exception() @@ -3187,8 +3165,6 @@ """Raised after we mutated metainterp.framestack, in order to force it to reload the current top-of-stack frame that gets interpreted.""" -NOT_HANDLED = history.CONST_FALSE - # ____________________________________________________________ def _get_opimpl_method(name, argcodes): diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,21 +22,6 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=['sys/stat.h', - 'unistd.h', - 'fcntl.h'], - ) - for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir - fpathconf fstat fstatat fstatvfs ftruncate futimens futimes - futimesat linkat lchflags lchmod lchown lstat lutimes - mkdirat mkfifoat mknodat openat readlinkat renameat - symlinkat unlinkat utimensat""".split(): - locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) -cConfig = rffi_platform.configure(CConfig) -globals().update(cConfig) - class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -618,14 +603,44 @@ config = rffi_platform.configure(CConfig) DIRENT = config['DIRENT'] DIRENTP = lltype.Ptr(DIRENT) - c_opendir = external('opendir', [rffi.CCHARP], DIRP, - save_err=rffi.RFFI_SAVE_ERRNO) + c_opendir = external('opendir', + [rffi.CCHARP], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) + c_fdopendir = external('fdopendir', + [rffi.INT], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) # XXX macro=True is hack to make sure we get the correct kind of # dirent struct (which depends on defines) c_readdir = external('readdir', [DIRP], DIRENTP, macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_closedir = external('closedir', [DIRP], rffi.INT) +def _listdir(dirp): + result = [] + while True: + direntp = c_readdir(dirp) + if not direntp: + error = get_saved_errno() + break + namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) + name = rffi.charp2str(namep) + if name != '.' and name != '..': + result.append(name) + c_closedir(dirp) + if error: + raise OSError(error, "readdir failed") + return result + +def fdlistdir(dirfd): + """ + Like listdir(), except that the directory is specified as an open + file descriptor. + + Note: fdlistdir() closes the file descriptor. + """ + dirp = c_fdopendir(dirfd) + if not dirp: + raise OSError(get_saved_errno(), "opendir failed") + return _listdir(dirp) + @replace_os_function('listdir') @specialize.argtype(0) def listdir(path): @@ -634,20 +649,7 @@ dirp = c_opendir(path) if not dirp: raise OSError(get_saved_errno(), "opendir failed") - result = [] - while True: - direntp = c_readdir(dirp) - if not direntp: - error = get_saved_errno() - break - namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) - name = rffi.charp2str(namep) - if name != '.' and name != '..': - result.append(name) - c_closedir(dirp) - if error: - raise OSError(error, "readdir failed") - return result + return _listdir(dirp) else: # _WIN32 case traits = _preferred_traits(path) win32traits = make_win32_traits(traits) @@ -1739,3 +1741,259 @@ def getcontroller(self): from rpython.rlib.rposix_environ import OsEnvironController return OsEnvironController() + + +# ____________________________________________________________ +# Support for f... and ...at families of POSIX functions + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve + fdopendir fpathconf fstat fstatat fstatvfs ftruncate + futimens futimes futimesat linkat chflags lchflags lchmod lchown + lstat lutimes mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) + +if not _WIN32: + class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') + AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') + AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH') + UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') + UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') + TIMESPEC = rffi_platform.Struct('struct timespec', [ + ('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) + + cConfig = rffi_platform.configure(CConfig) + globals().update(cConfig) + TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) + +if HAVE_FACCESSAT: + c_faccessat = external('faccessat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT) + + def faccessat(pathname, mode, dir_fd=AT_FDCWD, + effective_ids=False, follow_symlinks=True): + """Thin wrapper around faccessat(2) with an interface simlar to + Python3's os.access(). + """ + flags = 0 + if not follow_symlinks: + flags |= AT_SYMLINK_NOFOLLOW + if effective_ids: + flags |= AT_EACCESS + error = c_faccessat(dir_fd, pathname, mode, flags) + return error == 0 + +if HAVE_FCHMODAT: + c_fchmodat = external('fchmodat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchmodat(path, mode, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_fchmodat(dir_fd, path, mode, flag) + handle_posix_error('fchmodat', error) + +if HAVE_FCHOWNAT: + c_fchownat = external('fchownat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchownat(path, owner, group, dir_fd=AT_FDCWD, + follow_symlinks=True, empty_path=False): + flag = 0 + if not follow_symlinks: + flag |= AT_SYMLINK_NOFOLLOW + if empty_path: + flag |= AT_EMPTY_PATH + error = c_fchownat(dir_fd, path, owner, group, flag) + handle_posix_error('fchownat', error) + +if HAVE_FEXECVE: + c_fexecve = external('fexecve', + [rffi.INT, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def fexecve(fd, args, env): + envstrs = [] + for item in env.iteritems(): + envstr = "%s=%s" % item + envstrs.append(envstr) + + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + c_fexecve(fd, l_args, l_env) + + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execve failed") + +if HAVE_LINKAT: + c_linkat = external('linkat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) + + def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, + follow_symlinks=True): + """Thin wrapper around linkat(2) with an interface similar to + Python3's os.link() + """ + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_linkat(src_dir_fd, src, dst_dir_fd, dst, flag) + handle_posix_error('linkat', error) + +if HAVE_FUTIMENS: + c_futimens = external('futimens', [rffi.INT, TIMESPEC2P], rffi.INT) + + def futimens(fd, atime, atime_ns, mtime, mtime_ns): + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + error = c_futimens(fd, l_times) + lltype.free(l_times, flavor='raw') + handle_posix_error('futimens', error) + +if HAVE_UTIMENSAT: + c_utimensat = external('utimensat', + [rffi.INT, rffi.CCHARP, TIMESPEC2P, rffi.INT], rffi.INT) + + def utimensat(pathname, atime, atime_ns, mtime, mtime_ns, + dir_fd=AT_FDCWD, follow_symlinks=True): + """Wrapper around utimensat(2) + + To set access time to the current time, pass atime_ns=UTIME_NOW, + atime is then ignored. + + To set modification time to the current time, pass mtime_ns=UTIME_NOW, + mtime is then ignored. + """ + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_utimensat(dir_fd, pathname, l_times, flag) + lltype.free(l_times, flavor='raw') + handle_posix_error('utimensat', error) + +if HAVE_MKDIRAT: + c_mkdirat = external('mkdirat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkdirat(pathname, mode, dir_fd=AT_FDCWD): + error = c_mkdirat(dir_fd, pathname, mode) + handle_posix_error('mkdirat', error) + +if HAVE_UNLINKAT: + c_unlinkat = external('unlinkat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def unlinkat(pathname, dir_fd=AT_FDCWD, removedir=False): + flag = AT_REMOVEDIR if removedir else 0 + error = c_unlinkat(dir_fd, pathname, flag) + handle_posix_error('unlinkat', error) + +if HAVE_READLINKAT: + c_readlinkat = external( + 'readlinkat', + [rffi.INT, rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) + + def readlinkat(pathname, dir_fd=AT_FDCWD): + pathname = _as_bytes0(pathname) + bufsize = 1023 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = widen(c_readlinkat(dir_fd, pathname, buf, bufsize)) + if res < 0: + lltype.free(buf, flavor='raw') + error = get_saved_errno() # failed + raise OSError(error, "readlinkat failed") + elif res < bufsize: + break # ok + else: + # buf too small, try again with a larger buffer + lltype.free(buf, flavor='raw') + bufsize *= 4 + # convert the result to a string + result = rffi.charp2strn(buf, res) + lltype.free(buf, flavor='raw') + return result + +if HAVE_RENAMEAT: + c_renameat = external( + 'renameat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def renameat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD): + error = c_renameat(src_dir_fd, src, dst_dir_fd, dst) + handle_posix_error('renameat', error) + + +if HAVE_SYMLINKAT: + c_symlinkat = external('symlinkat', + [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def symlinkat(src, dst, dir_fd=AT_FDCWD): + error = c_symlinkat(src, dir_fd, dst) + handle_posix_error('symlinkat', error) + +if HAVE_OPENAT: + c_openat = external('openat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + @enforceargs(s_Str0, int, int, int, typecheck=False) + def openat(path, flags, mode, dir_fd=AT_FDCWD): + fd = c_openat(dir_fd, path, flags, mode) + return handle_posix_error('open', fd) + +if HAVE_MKFIFOAT: + c_mkfifoat = external('mkfifoat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkfifoat(path, mode, dir_fd=AT_FDCWD): + error = c_mkfifoat(dir_fd, path, mode) + handle_posix_error('mkfifoat', error) + +if HAVE_MKNODAT: + c_mknodat = external('mknodat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mknodat(path, mode, device, dir_fd=AT_FDCWD): + error = c_mknodat(dir_fd, path, mode, device) + handle_posix_error('mknodat', error) diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -1,3 +1,4 @@ +import platform as host_platform import py import sys from rpython.tool.udir import udir @@ -28,8 +29,7 @@ def setup(): - from rpython.jit.backend import detect_cpu - if detect_cpu.autodetect().startswith(detect_cpu.MODEL_S390_64): + if host_platform.machine() == 's390x': raise VMProfPlatformUnsupported("rvmprof not supported on" " s390x CPUs for now") compile_extra = ['-DRPYTHON_LL2CTYPES'] diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -7,6 +7,12 @@ import errno import py +def rposix_requires(funcname): + return py.test.mark.skipif(not hasattr(rposix, funcname), + reason="Requires rposix.%s()" % funcname) + +win_only = py.test.mark.skipif("os.name != 'nt'") + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -29,9 +35,8 @@ for value in times: assert isinstance(value, float) + @py.test.mark.skipif("not hasattr(os, 'getlogin')") def test_getlogin(self): - if not hasattr(os, 'getlogin'): - py.test.skip('posix specific function') try: expected = os.getlogin() except OSError, e: @@ -39,9 +44,8 @@ data = rposix.getlogin() assert data == expected + @win_only def test_utimes(self): - if os.name != 'nt': - py.test.skip('Windows specific feature') # Windows support centiseconds def f(fname, t1): os.utime(fname, (t1, t1)) @@ -51,15 +55,12 @@ t1 = 1159195039.25 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') t1 = 5000000000.0 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime + @win_only def test__getfullpathname(self): - if os.name != 'nt': - py.test.skip('nt specific function') posix = __import__(os.name) sysdrv = os.getenv('SystemDrive', 'C:') stuff = sysdrv + 'stuff' @@ -99,11 +100,25 @@ def test_mkdir(self): filename = str(udir.join('test_mkdir.dir')) rposix.mkdir(filename, 0) - exc = py.test.raises(OSError, rposix.mkdir, filename, 0) - assert exc.value.errno == errno.EEXIST + with py.test.raises(OSError) as excinfo: + rposix.mkdir(filename, 0) + assert excinfo.value.errno == errno.EEXIST if sys.platform == 'win32': assert exc.type is WindowsError + @rposix_requires('mkdirat') + def test_mkdirat(self): + relpath = 'test_mkdirat.dir' + filename = str(udir.join(relpath)) + dirfd = os.open(os.path.dirname(filename), os.O_RDONLY) + try: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + with py.test.raises(OSError) as excinfo: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + assert excinfo.value.errno == errno.EEXIST + finally: + os.close(dirfd) + def test_strerror(self): assert rposix.strerror(2) == os.strerror(2) @@ -116,10 +131,8 @@ os.unlink(filename) + @py.test.mark.skipif("os.name != 'posix'") def test_execve(self): - if os.name != 'posix': - py.test.skip('posix specific function') - EXECVE_ENV = {"foo": "bar", "baz": "quux"} def run_execve(program, args=None, env=None, do_path_lookup=False): @@ -258,11 +271,8 @@ assert rposix.isatty(-1) is False + at py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): - def setup_class(cls): - if not hasattr(os, 'ttyname'): - py.test.skip("no ttyname") - def test_ttyname(self): def f(): import os @@ -426,9 +436,8 @@ except Exception: pass + @win_only def test_is_valid_fd(self): - if os.name != 'nt': - py.test.skip('relevant for windows only') assert rposix.is_valid_fd(0) == 1 fid = open(str(udir.join('validate_test.txt')), 'w') fd = fid.fileno() @@ -448,6 +457,59 @@ def _get_filename(self): return str(udir.join('test_open_ascii')) + @rposix_requires('openat') + def test_openat(self): + def f(dirfd): + try: + fd = rposix.openat('test_open_ascii', os.O_RDONLY, 0777, dirfd) + try: + text = os.read(fd, 50) + return text + finally: + os.close(fd) + except OSError: + return '' + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + assert ll_to_string(interpret(f, [dirfd])) == "test" + finally: + os.close(dirfd) + + @rposix_requires('unlinkat') + def test_unlinkat(self): + def f(dirfd): + return rposix.unlinkat('test_open_ascii', dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) + finally: + os.close(dirfd) + assert not os.path.exists(self.ufilename) + + def test_utimensat(self): + def f(dirfd): + return rposix.utimensat('test_open_ascii', + 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + + def test_fchmodat(self): + def f(dirfd): + return rposix.fchmodat('test_open_ascii', 0777, dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + @@ -465,3 +527,30 @@ os.open('/tmp/t', 0, 0) os.open(u'/tmp/t', 0, 0) compile(f, ()) + + +def test_fdlistdir(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + result = rposix.fdlistdir(dirfd) + # Note: fdlistdir() always closes dirfd + assert result == ['file'] + +def test_symlinkat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + rposix.symlinkat('file', 'link', dir_fd=dirfd) + assert os.readlink(str(tmpdir.join('link'))) == 'file' + finally: + os.close(dirfd) + +def test_renameat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + rposix.renameat('file', 'file2', src_dir_fd=dirfd, dst_dir_fd=dirfd) + finally: + os.close(dirfd) + assert tmpdir.join('file').check(exists=False) + assert tmpdir.join('file2').check(exists=True) From pypy.commits at gmail.com Wed Apr 6 11:49:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 08:49:55 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merge branch 'follow_symlinks' Message-ID: <57053023.90051c0a.85c37.5785@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83543:3fcb678f1119 Date: 2016-04-06 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/3fcb678f1119/ Log: Merge branch 'follow_symlinks' Update implementation of many posix functions to support functionalities added in 3.3. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -66,6 +66,13 @@ """NOT_RPYTHON""" raise NotImplementedError +def kwonly(arg_unwrapper): + """Mark argument as keyword-only. + + XXX: has no actual effect for now. + """ + return arg_unwrapper + class UnwrapSpecRecipe(object): "NOT_RPYTHON" diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -134,8 +134,8 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyBytes_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyBytes_AsString only support strings")) + raise oefmt(space.w_TypeError, + "expected bytes, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -147,8 +147,8 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyBytes_AsStringAndSize(space, ref, buffer, length): if not PyBytes_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyBytes_AsStringAndSize only support strings")) + raise oefmt(space.w_TypeError, + "expected bytes, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,5 +1,12 @@ import os import sys +from math import modf +from errno import EOPNOTSUPP +try: + from errno import ENOTSUP +except ImportError: + # some Pythons don't have errno.ENOTSUP + ENOTSUP = 0 from rpython.rlib import rposix, rposix_stat from rpython.rlib import objectmodel, rurandom @@ -7,9 +14,10 @@ from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.interpreter.error import (OperationError, wrap_oserror, - wrap_oserror2, strerror as _strerror) +from pypy.interpreter.gateway import ( + unwrap_spec, WrappedDefault, Unwrapper, kwonly) +from pypy.interpreter.error import ( + OperationError, wrap_oserror, oefmt, wrap_oserror2, strerror as _strerror) from pypy.interpreter.executioncontext import ExecutionContext @@ -31,7 +39,7 @@ pass else: def check_uid_range(space, num): - if num < -(1<<31) or num >= (1<<32): + if num < -(1 << 31) or num >= (1 << 32): raise OperationError(space.w_OverflowError, space.wrap("integer out of range")) @@ -104,15 +112,63 @@ return func(fname1, fname2, *args) return dispatch - at unwrap_spec(flag=c_int, mode=c_int) -def open(space, w_fname, flag, mode=0777): - """Open a file (for low level IO). -Return a file descriptor (a small integer).""" + +if hasattr(rposix, 'AT_FDCWD'): + DEFAULT_DIR_FD = rposix.AT_FDCWD +else: + DEFAULT_DIR_FD = -100 +DIR_FD_AVAILABLE = False + +def unwrap_fd(space, w_value): + return space.c_int_w(w_value) + +def _unwrap_dirfd(space, w_value): + if space.is_none(w_value): + return DEFAULT_DIR_FD + else: + return unwrap_fd(space, w_value) + +class _DirFD(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_dirfd(space, w_value) + +class _DirFD_Unavailable(Unwrapper): + def unwrap(self, space, w_value): + dir_fd = unwrap_fd(space, w_value) + if dir_fd == DEFAULT_DIR_FD: + return dir_fd + else: + raise oefmt( + space.w_NotImplementedError, + "dir_fd unavailable on this platform") + +def DirFD(available=False): + return _DirFD if available else _DirFD_Unavailable + + at specialize.arg(1, 2) +def argument_unavailable(space, funcname, arg): + return oefmt( + space.w_NotImplementedError, + "%s: %s unavailable on this platform", funcname, arg) + + at unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD(rposix.HAVE_OPENAT)) +def open(space, w_path, flags, mode=0777, dir_fd=DEFAULT_DIR_FD): + """open(path, flags, mode=0o777, *, dir_fd=None) + +Open a file for low level IO. Returns a file handle (integer). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - fd = dispatch_filename(rposix.open)( - space, w_fname, flag, mode) - except OSError, e: - raise wrap_oserror2(space, e, w_fname) + if dir_fd == DEFAULT_DIR_FD: + fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) + else: + path = space.fsencode_w(w_path) + fd = rposix.openat(path, flags, mode, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) return space.wrap(fd) @unwrap_spec(fd=c_int, pos=r_longlong, how=c_int) @@ -282,7 +338,8 @@ for i, (name, _) in STATVFS_FIELDS: vals_w[i] = space.wrap(getattr(st, name)) w_tuple = space.newtuple(vals_w) - w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + w_statvfs_result = space.getattr( + space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) return space.call_function(w_statvfs_result, w_tuple) @@ -297,21 +354,23 @@ else: return build_stat_result(space, st) -def stat(space, w_path): - """Perform a stat system call on the given path. Return an object -with (at least) the following attributes: - st_mode - st_ino - st_dev - st_nlink - st_uid - st_gid - st_size - st_atime - st_mtime - st_ctime -""" + at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) +def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result +Perform a stat system call on the given path. + +path may be specified as either a string or as an open file descriptor. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. + dir_fd may not be supported on your platform; if it is unavailable, using + it will raise a NotImplementedError. +If follow_symlinks is False, and the last element of the path is a symbolic + link, stat will examine the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor.""" try: st = dispatch_filename(rposix_stat.stat, 0, allow_fd_fn=rposix_stat.fstat)(space, w_path) @@ -320,8 +379,13 @@ else: return build_stat_result(space, st) -def lstat(space, w_path): - "Like stat(path), but do not follow symbolic links." + at unwrap_spec(dir_fd=DirFD(available=False)) +def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): + """lstat(path, *, dir_fd=None) -> stat result + +Like stat(), but do not follow symbolic links. +Equivalent to stat(path, follow_symlinks=False).""" + try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: @@ -360,6 +424,13 @@ def statvfs(space, w_path): + """statvfs(path) + +Perform a statvfs system call on the given path. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception.""" try: st = dispatch_filename(rposix_stat.statvfs)(space, w_path) except OSError as e: @@ -387,20 +458,46 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(mode=c_int) -def access(space, w_path, mode): - """ - access(path, mode) -> 1 if granted, 0 otherwise + at unwrap_spec(mode=c_int, + dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) +def access(space, w_path, mode, + dir_fd=DEFAULT_DIR_FD, effective_ids=True, follow_symlinks=True): + """\ +access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True) - Use the real uid/gid to test for access to a path. Note that most - operations will use the effective uid/gid, therefore this routine can - be used in a suid/sgid environment to test if the invoking user has the - specified access to the path. The mode argument can be F_OK to test - existence, or the inclusive-OR of R_OK, W_OK, and X_OK. - """ +Use the real uid/gid to test for access to a path. Returns True if granted, +False otherwise. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If effective_ids is True, access will use the effective uid/gid instead of + the real uid/gid. +If follow_symlinks is False, and the last element of the path is a symbolic + link, access will examine the symbolic link itself instead of the file the + link points to. +dir_fd, effective_ids, and follow_symlinks may not be implemented + on your platform. If they are unavailable, using them will raise a + NotImplementedError. + +Note that most operations will use the effective uid/gid, therefore this + routine can be used in a suid/sgid environment to test if the invoking user + has the specified access to the path. +The mode argument can be F_OK to test existence, or the inclusive-OR + of R_OK, W_OK, and X_OK.""" + if not rposix.HAVE_FACCESSAT: + if not follow_symlinks: + raise argument_unavailable(space, "access", "follow_symlinks") + if effective_ids: + raise argument_unavailable(space, "access", "effective_ids") + try: - ok = dispatch_filename(rposix.access)(space, w_path, mode) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD and follow_symlinks and not effective_ids: + ok = dispatch_filename(rposix.access)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + ok = rposix.faccessat(path, mode, + dir_fd, effective_ids, follow_symlinks) + except OSError as e: raise wrap_oserror2(space, e, w_path) else: return space.wrap(ok) @@ -433,18 +530,42 @@ else: return space.wrap(rc) -def unlink(space, w_path): - """Remove a file (same as remove(path)).""" + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) +def unlink(space, w_path, dir_fd=DEFAULT_DIR_FD): + """unlink(path, *, dir_fd=None) + +Remove a file (same as remove()). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.unlink)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.unlink)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=False) + except OSError as e: raise wrap_oserror2(space, e, w_path) -def remove(space, w_path): - """Remove a file (same as unlink(path)).""" + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) +def remove(space, w_path, dir_fd=DEFAULT_DIR_FD): + """remove(path, *, dir_fd=None) + +Remove a file (same as unlink()). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.unlink)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.unlink)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=False) + except OSError as e: raise wrap_oserror2(space, e, w_path) def _getfullpathname(space, w_path): @@ -493,19 +614,44 @@ except OSError, e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(mode=c_int) -def mkdir(space, w_path, mode=0777): - """Create a directory.""" + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) +def mkdir(space, w_path, mode=0o777, dir_fd=DEFAULT_DIR_FD): + """mkdir(path, mode=0o777, *, dir_fd=None) + +Create a directory. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError. + +The mode argument is ignored on Windows.""" try: - dispatch_filename(rposix.mkdir)(space, w_path, mode) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mkdir)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + rposix.mkdirat(path, mode, dir_fd) + except OSError as e: raise wrap_oserror2(space, e, w_path) -def rmdir(space, w_path): - """Remove a directory.""" + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) +def rmdir(space, w_path, dir_fd=DEFAULT_DIR_FD): + """rmdir(path, *, dir_fd=None) + +Remove a directory. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.rmdir)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.rmdir)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=True) + except OSError as e: raise wrap_oserror2(space, e, w_path) @unwrap_spec(errno=c_int) @@ -538,9 +684,11 @@ self.space = space self.w_environ = space.newdict() self.random_context = rurandom.init_urandom() + def startup(self, space): space.call_method(self.w_environ, 'clear') _convertenviron(space, self.w_environ) + def _freeze_(self): # don't capture the environment in the translated pypy self.space.call_method(self.w_environ, 'clear') @@ -596,35 +744,56 @@ raise wrap_oserror(space, e) - at unwrap_spec(w_dirname=WrappedDefault(u".")) -def listdir(space, w_dirname): - """Return a list containing the names of the entries in the directory. + at unwrap_spec(w_path=WrappedDefault(u".")) +def listdir(space, w_path): + """listdir(path='.') -> list_of_filenames -\tpath: path of directory to list +Return a list containing the names of the files in the directory. +The list is in arbitrary order. It does not include the special +entries '.' and '..' even if they are present in the directory. -The list is in arbitrary order. It does not include the special -entries '.' and '..' even if they are present in the directory.""" +path can be specified as either str or bytes. If path is bytes, + the filenames returned will also be bytes; in all other circumstances + the filenames returned will be str. +On some platforms, path may also be specified as an open file descriptor; + the file descriptor must refer to a directory. + If this functionality is unavailable, using it raises NotImplementedError.""" + if space.isinstance_w(w_path, space.w_bytes): + dirname = space.str0_w(w_path) + try: + result = rposix.listdir(dirname) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + return space.newlist_bytes(result) try: - if space.isinstance_w(w_dirname, space.w_unicode): - dirname = FileEncoder(space, w_dirname) + path = space.fsencode_w(w_path) + except OperationError as operr: + if not rposix.HAVE_FDOPENDIR: + raise oefmt(space.w_TypeError, + "listdir: illegal type for path argument") + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + fd = unwrap_fd(space, w_path) + try: + result = rposix.fdlistdir(fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + dirname = FileEncoder(space, w_path) + try: result = rposix.listdir(dirname) - len_result = len(result) - result_w = [None] * len_result - for i in range(len_result): - if _WIN32: - result_w[i] = space.wrap(result[i]) - else: - w_bytes = space.wrapbytes(result[i]) - result_w[i] = space.fsdecode(w_bytes) - return space.newlist(result_w) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + len_result = len(result) + result_w = [None] * len_result + for i in range(len_result): + if _WIN32: + result_w[i] = space.wrap(result[i]) else: - dirname = space.str0_w(w_dirname) - result = rposix.listdir(dirname) - # The list comprehension is a workaround for an obscure translation - # bug. - return space.newlist_bytes([x for x in result]) - except OSError, e: - raise wrap_oserror2(space, e, w_dirname) + w_bytes = space.wrapbytes(result[i]) + result_w[i] = space.fsdecode(w_bytes) + return space.newlist(result_w) def pipe(space): "Create a pipe. Returns (read_end, write_end)." @@ -634,57 +803,160 @@ raise wrap_oserror(space, e) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) - at unwrap_spec(mode=c_int) -def chmod(space, w_path, mode): - "Change the access permissions of a file." + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), follow_symlinks=kwonly(bool)) +def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + """chmod(path, mode, *, dir_fd=None, follow_symlinks=True) + +Change the access permissions of a file. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, chmod will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor. +dir_fd and follow_symlinks may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" + if not rposix.HAVE_FCHMODAT: + if not follow_symlinks: + raise argument_unavailable(space, "chmod", "follow_symlinks") + else: + try: + dispatch_filename(rposix.chmod)(space, w_path, mode) + return + except OSError as e: + raise wrap_oserror2(space, e, w_path) + try: - dispatch_filename(rposix.chmod)(space, w_path, mode) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + path = space.fsencode_w(w_path) + except OperationError as operr: + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + fd = unwrap_fd(space, w_path) + _chmod_fd(space, fd, mode) + else: + try: + _chmod_path(path, mode, dir_fd, follow_symlinks) + except OSError as e: + if not follow_symlinks and e.errno in (ENOTSUP, EOPNOTSUPP): + # fchmodat() doesn't actually implement follow_symlinks=False + # so raise NotImplementedError in this case + raise argument_unavailable(space, "chmod", "follow_symlinks") + else: + raise wrap_oserror2(space, e, w_path) - at unwrap_spec(mode=c_int) -def fchmod(space, w_fd, mode): - """Change the access permissions of the file given by file -descriptor fd.""" - fd = space.c_filedescriptor_w(w_fd) +def _chmod_path(path, mode, dir_fd, follow_symlinks): + if dir_fd != DEFAULT_DIR_FD or not follow_symlinks: + rposix.fchmodat(path, mode, dir_fd, follow_symlinks) + else: + rposix.chmod(path, mode) + +def _chmod_fd(space, fd, mode): try: os.fchmod(fd, mode) + except OSError as e: + raise wrap_oserror(space, e) + + + at unwrap_spec(fd=c_int, mode=c_int) +def fchmod(space, fd, mode): + """\ + Change the access permissions of the file given by file descriptor fd. + """ + _chmod_fd(space, fd, mode) + + at unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), + dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) +def rename(space, w_src, w_dst, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): + """rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None) + +Rename a file or directory. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +src_dir_fd and dst_dir_fd, may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" + try: + if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD): + src = space.fsencode_w(w_src) + dst = space.fsencode_w(w_dst) + rposix.renameat(src, dst, src_dir_fd, dst_dir_fd) + else: + dispatch_filename_2(rposix.rename)(space, w_src, w_dst) except OSError, e: raise wrap_oserror(space, e) -def rename(space, w_old, w_new): - "Rename a file or directory." + at unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), + dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) +def replace(space, w_src, w_dst, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): + """replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None) + +Rename a file or directory, overwriting the destination. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +src_dir_fd and dst_dir_fd, may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" try: - dispatch_filename_2(rposix.rename)(space, w_old, w_new) - except OSError, e: + if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD): + src = space.fsencode_w(w_src) + dst = space.fsencode_w(w_dst) + rposix.renameat(src, dst, src_dir_fd, dst_dir_fd) + else: + dispatch_filename_2(rposix.replace)(space, w_src, w_dst) + except OSError as e: raise wrap_oserror(space, e) -def replace(space, w_old, w_new): - "Replace a file or directory, overwriting the destination." + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT)) +def mkfifo(space, w_path, mode=0666, dir_fd=DEFAULT_DIR_FD): + """mkfifo(path, mode=0o666, *, dir_fd=None) + +Create a FIFO (a POSIX named pipe). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename_2(rposix.replace)(space, w_old, w_new) - except OSError, e: - raise wrap_oserror(space, e) + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mkfifo)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + rposix.mkfifoat(path, mode, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) - at unwrap_spec(mode=c_int) -def mkfifo(space, w_filename, mode=0666): - """Create a FIFO (a POSIX named pipe).""" - try: - dispatch_filename(rposix.mkfifo)(space, w_filename, mode) - except OSError, e: - raise wrap_oserror2(space, e, w_filename) + at unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD(rposix.HAVE_MKNODAT)) +def mknod(space, w_filename, mode=0600, device=0, dir_fd=DEFAULT_DIR_FD): + """mknod(filename, mode=0o600, device=0, *, dir_fd=None) - at unwrap_spec(mode=c_int, device=c_int) -def mknod(space, w_filename, mode=0600, device=0): - """Create a filesystem node (file, device special file or named pipe) +Create a filesystem node (file, device special file or named pipe) named filename. mode specifies both the permissions to use and the type of node to be created, being combined (bitwise OR) with one of S_IFREG, S_IFCHR, S_IFBLK, and S_IFIFO. For S_IFCHR and S_IFBLK, device defines the newly created device special file (probably using -os.makedev()), otherwise it is ignored.""" +os.makedev()), otherwise it is ignored. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.mknod)(space, w_filename, mode, device) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mknod)(space, w_filename, mode, device) + else: + fname = space.fsencode_w(w_filename) + rposix.mknodat(fname, mode, device, dir_fd) + except OSError as e: raise wrap_oserror2(space, e, w_filename) @unwrap_spec(mask=c_int) @@ -723,31 +995,85 @@ import signal rposix.kill(os.getpid(), signal.SIGABRT) - at unwrap_spec(src='fsencode', dst='fsencode') -def link(space, src, dst): - "Create a hard link to a file." + at unwrap_spec( + src='fsencode', dst='fsencode', + src_dir_fd=DirFD(rposix.HAVE_LINKAT), dst_dir_fd=DirFD(rposix.HAVE_LINKAT), + follow_symlinks=kwonly(bool)) +def link( + space, src, dst, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD, + follow_symlinks=True): + """\ +link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True) + +Create a hard link to a file. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +If follow_symlinks is False, and the last element of src is a symbolic + link, link will create a link to the symbolic link itself instead of the + file the link points to. +src_dir_fd, dst_dir_fd, and follow_symlinks may not be implemented on your + platform. If they are unavailable, using them will raise a + NotImplementedError.""" try: - os.link(src, dst) - except OSError, e: + if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD + or not follow_symlinks): + rposix.linkat(src, dst, src_dir_fd, dst_dir_fd, follow_symlinks) + else: + rposix.link(src, dst) + except OSError as e: raise wrap_oserror(space, e) -def symlink(space, w_src, w_dst, w_target_is_directory=None): - "Create a symbolic link pointing to src named dst." - # TODO: target_is_directory has a meaning on Windows + + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_SYMLINKAT)) +def symlink(space, w_src, w_dst, w_target_is_directory=None, + dir_fd=DEFAULT_DIR_FD): + """symlink(src, dst, target_is_directory=False, *, dir_fd=None) + +Create a symbolic link pointing to src named dst. + +target_is_directory is required on Windows if the target is to be + interpreted as a directory. (On Windows, symlink requires + Windows 6.0 or greater, and raises a NotImplementedError otherwise.) + target_is_directory is ignored on non-Windows platforms. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) + else: + src = space.fsencode_w(w_src) + dst = space.fsencode_w(w_dst) + rposix.symlinkat(src, dst, dir_fd) + except OSError as e: raise wrap_oserror(space, e) -def readlink(space, w_path): - "Return a string representing the path to which the symbolic link points." + + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_READLINKAT)) +def readlink(space, w_path, dir_fd=DEFAULT_DIR_FD): + """readlink(path, *, dir_fd=None) -> path + +Return a string representing the path to which the symbolic link points. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" is_unicode = space.isinstance_w(w_path, space.w_unicode) if is_unicode: path = space.fsencode_w(w_path) else: path = space.bytes0_w(w_path) try: - result = os.readlink(path) + if dir_fd == DEFAULT_DIR_FD: + result = rposix.readlink(path) + else: + result = rposix.readlinkat(path, dir_fd) except OSError, e: raise wrap_oserror2(space, e, w_path) w_result = space.wrapbytes(result) @@ -837,7 +1163,7 @@ def _exit(space, status): os._exit(status) -def execv(space, w_command, w_args): +def execv(space, w_path, w_args): """ execv(path, args) Execute an executable path with arguments, replacing current process. @@ -845,7 +1171,23 @@ path: path of executable file args: iterable of strings """ - execve(space, w_command, w_args, None) + command = space.fsencode_w(w_path) + try: + args_w = space.unpackiterable(w_args) + if len(args_w) < 1: + raise oefmt(space.w_ValueError, + "execv() arg 2 must not be empty") + args = [space.fsencode_w(w_arg) for w_arg in args_w] + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise oefmt(space.w_TypeError, + "execv() arg 2 must be an iterable of strings") + try: + os.execv(command, args) + except OSError as e: + raise wrap_oserror(space, e) + def _env2interp(space, w_env): env = {} @@ -855,38 +1197,45 @@ env[space.fsencode_w(w_key)] = space.fsencode_w(w_value) return env -def execve(space, w_command, w_args, w_env): - """ execve(path, args, env) + +def execve(space, w_path, w_argv, w_environment): + """execve(path, args, env) Execute a path with arguments and environment, replacing current process. - path: path of executable file - args: iterable of arguments - env: dictionary of strings mapping to strings + path: path of executable file + args: tuple or list of arguments + env: dictionary of strings mapping to strings + +On some platforms, you may specify an open file descriptor for path; + execve will execute the program the file descriptor is open to. + If this functionality is unavailable, using it raises NotImplementedError. """ - command = space.fsencode_w(w_command) + if not (space.isinstance_w(w_argv, space.w_list) + or space.isinstance_w(w_argv, space.w_tuple)): + raise oefmt(space.w_TypeError, + "execve: argv must be a tuple or a list") + args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] + env = _env2interp(space, w_environment) try: - args_w = space.unpackiterable(w_args) - if len(args_w) < 1: - w_msg = space.wrap("execv() must have at least one argument") - raise OperationError(space.w_ValueError, w_msg) - args = [space.fsencode_w(w_arg) for w_arg in args_w] - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - msg = "execv() arg 2 must be an iterable of strings" - raise OperationError(space.w_TypeError, space.wrap(str(msg))) - # - if w_env is None: # when called via execv() above + path = space.fsencode_w(w_path) + except OperationError: + if not rposix.HAVE_FEXECVE: + raise oefmt(space.w_TypeError, + "execve: illegal type for path argument") + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + # File descriptor case + fd = unwrap_fd(space, w_path) try: - os.execv(command, args) - except OSError, e: + rposix.fexecve(fd, args, env) + except OSError as e: raise wrap_oserror(space, e) else: - env = _env2interp(space, w_env) try: - os.execve(command, args, env) - except OSError, e: + os.execve(path, args, env) + except OSError as e: raise wrap_oserror(space, e) @unwrap_spec(mode=int, path='fsencode') @@ -908,14 +1257,55 @@ raise wrap_oserror(space, e) return space.wrap(ret) -def utime(space, w_path, w_tuple): - """ utime(path, (atime, mtime)) -utime(path, None) -Set the access and modified time of the file to the given values. If the -second form is used, set the access and modified times to the current time. - """ - if space.is_w(w_tuple, space.w_None): + at unwrap_spec(w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), + dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) +def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) + +Set the access and modified time of path. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. + +If times is not None, it must be a tuple (atime, mtime); + atime and mtime should be expressed as float seconds since the epoch. +If ns is not None, it must be a tuple (atime_ns, mtime_ns); + atime_ns and mtime_ns should be expressed as integer nanoseconds + since the epoch. +If both times and ns are None, utime uses the current time. +Specifying tuples for both times and ns is an error. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, utime will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path + as an open file descriptor. +dir_fd and follow_symlinks may not be available on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" + if (not space.is_w(w_times, space.w_None) and + not space.is_w(w_ns, space.w_None)): + raise oefmt(space.w_ValueError, + "utime: you may specify either 'times' or 'ns' but not both") + + if rposix.HAVE_UTIMENSAT: + path = space.fsencode_w(w_path) + try: + _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks) + return + except OSError, e: + raise wrap_oserror2(space, e, w_path) + + if not follow_symlinks: + raise argument_unavailable(space, "utime", "follow_symlinks") + + if not space.is_w(w_ns, space.w_None): + raise oefmt(space.w_NotImplementedError, + "utime: 'ns' unsupported on this platform on PyPy") + if space.is_w(w_times, space.w_None): try: dispatch_filename(rposix.utime, 1)(space, w_path, None) return @@ -923,7 +1313,7 @@ raise wrap_oserror2(space, e, w_path) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" - args_w = space.fixedview(w_tuple) + args_w = space.fixedview(w_times) if len(args_w) != 2: raise OperationError(space.w_TypeError, space.wrap(msg)) actime = space.float_w(args_w[0], allow_conversion=False) @@ -936,6 +1326,51 @@ raise raise OperationError(space.w_TypeError, space.wrap(msg)) + +def _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks): + if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): + atime_s = mtime_s = 0 + atime_ns = mtime_ns = rposix.UTIME_NOW + elif not space.is_w(w_times, space.w_None): + times_w = space.fixedview(w_times) + if len(times_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'times' must be either a tuple of two ints or None") + atime_s, atime_ns = convert_seconds(space, times_w[0]) + mtime_s, mtime_ns = convert_seconds(space, times_w[1]) + else: + args_w = space.fixedview(w_ns) + if len(args_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'ns' must be a tuple of two ints") + atime_s, atime_ns = convert_ns(space, args_w[0]) + mtime_s, mtime_ns = convert_ns(space, args_w[1]) + + rposix.utimensat( + path, atime_s, atime_ns, mtime_s, mtime_ns, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + +def convert_seconds(space, w_time): + if space.isinstance_w(w_time, space.w_float): + time = space.float_w(w_time) + fracpart, intpart = modf(time) + if fracpart < 0: + fracpart += 1. + intpart -= 1. + return int(intpart), int(fracpart*1e9) + else: + time = space.int_w(w_time) + return time, 0 + +def convert_ns(space, w_ns_time): + w_billion = space.wrap(1000000000) + w_res = space.divmod(w_ns_time, w_billion) + res_w = space.fixedview(w_res) + time_int = space.int_w(res_w[0]) + time_frac = space.int_w(res_w[1]) + return time_int, time_frac + + def uname(space): """ uname() -> (sysname, nodename, release, version, machine) @@ -1073,7 +1508,7 @@ @unwrap_spec(username=str, gid=c_gid_t) def initgroups(space, username, gid): """ initgroups(username, gid) -> None - + Call the system initgroups() to initialize the group access list with all of the groups of which the specified username is a member, plus the specified group id. @@ -1246,7 +1681,7 @@ @unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) def setresgid(space, rgid, egid, sgid): """ setresgid(rgid, egid, sgid) - + Set the current process's real, effective, and saved group ids. """ try: @@ -1327,20 +1762,73 @@ raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) -def chown(space, path, uid, gid): - """Change the owner and group id of path to the numeric uid and gid.""" + at unwrap_spec( + uid=c_uid_t, gid=c_gid_t, + dir_fd=DirFD(rposix.HAVE_FCHOWNAT), follow_symlinks=kwonly(bool)) +def chown(space, w_path, uid, gid, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + """chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True) + +Change the owner and group id of path to the numeric uid and gid. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, chown will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor. +dir_fd and follow_symlinks may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" check_uid_range(space, uid) check_uid_range(space, gid) + if not (rposix.HAVE_LCHOWN or rposix.HAVE_FCHMODAT): + if not follow_symlinks: + raise argument_unavailable(space, 'chown', 'follow_symlinks') try: - os.chown(path, uid, gid) - except OSError, e: - raise wrap_oserror(space, e, path) + path = space.fsencode_w(w_path) + except OperationError: + if not space.isinstance_w(w_path, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_path) + # File descriptor case + fd = unwrap_fd(space, w_path) + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "chown: can't specify both dir_fd and fd") + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "chown: cannnot use fd and follow_symlinks together") + try: + os.fchown(fd, uid, gid) + except OSError as e: + raise wrap_oserror(space, e) + else: + # String case + try: + if (rposix.HAVE_LCHOWN and + dir_fd == DEFAULT_DIR_FD and not follow_symlinks): + os.lchown(path, uid, gid) + elif rposix.HAVE_FCHOWNAT and ( + not follow_symlinks or dir_fd != DEFAULT_DIR_FD): + rposix.fchownat(path, uid, gid, dir_fd, follow_symlinks) + else: + assert follow_symlinks + assert dir_fd == DEFAULT_DIR_FD + os.chown(path, uid, gid) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) def lchown(space, path, uid, gid): - """Change the owner and group id of path to the numeric uid and gid. -This function will not follow symbolic links.""" + """lchown(path, uid, gid) + +Change the owner and group id of path to the numeric uid and gid. +This function will not follow symbolic links. +Equivalent to os.chown(path, uid, gid, follow_symlinks=False).""" check_uid_range(space, uid) check_uid_range(space, gid) try: @@ -1350,8 +1838,10 @@ @unwrap_spec(uid=c_uid_t, gid=c_gid_t) def fchown(space, w_fd, uid, gid): - """Change the owner and group id of the file given by file descriptor -fd to the numeric uid and gid.""" + """fchown(fd, uid, gid) + +Change the owner and group id of the file given by file descriptor +fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" fd = space.c_filedescriptor_w(w_fd) check_uid_range(space, uid) check_uid_range(space, gid) @@ -1458,11 +1948,71 @@ raise wrap_oserror2(space, e, w_path) return space.wrap(result) + +def chflags(): + """chflags(path, flags, *, follow_symlinks=True) + +Set file flags. + +If follow_symlinks is False, and the last element of the path is a symbolic + link, chflags will change flags on the symbolic link itself instead of the + file the link points to. +follow_symlinks may not be implemented on your platform. If it is +unavailable, using it will raise a NotImplementedError.""" + +def lchflags(): + """lchflags(path, flags) + +Set file flags. +This function will not follow symbolic links. +Equivalent to chflags(path, flags, follow_symlinks=False).""" + +def getxattr(): + """getxattr(path, attribute, *, follow_symlinks=True) -> value + +Return the value of extended attribute attribute on path. + +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, getxattr will examine the symbolic link itself instead of the file + the link points to.""" + +def setxattr(): + """setxattr(path, attribute, value, flags=0, *, follow_symlinks=True) + +Set extended attribute attribute on path to value. +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, setxattr will modify the symbolic link itself instead of the file + the link points to.""" + + +def removexattr(): + """removexattr(path, attribute, *, follow_symlinks=True) + +Remove extended attribute attribute on path. +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, removexattr will modify the symbolic link itself instead of the file + the link points to.""" + +def listxattr(): + """listxattr(path='.', *, follow_symlinks=True) + +Return a list of extended attributes on path. + +path may be either None, a string, or an open file descriptor. +if path is None, listxattr will examine the current directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, listxattr will examine the symbolic link itself instead of the file + the link points to.""" + + have_functions = [] for name in """FCHDIR FCHMOD FCHMODAT FCHOWN FCHOWNAT FEXECVE FDOPENDIR FPATHCONF FSTATAT FSTATVFS FTRUNCATE FUTIMENS FUTIMES FUTIMESAT LINKAT LCHFLAGS LCHMOD LCHOWN LSTAT LUTIMES - MKDIRAT MKFIFOAT MKNODAT OPENAT READLINKAT RENAMEAT + MKDIRAT MKFIFOAT MKNODAT OPENAT READLINKAT RENAMEAT SYMLINKAT UNLINKAT UTIMENSAT""".split(): if getattr(rposix, "HAVE_%s" % name): have_functions.append("HAVE_%s" % name) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1,18 +1,16 @@ - # -*- coding: utf-8 -*- -from __future__ import with_statement -from pypy.objspace.std import StdObjSpace -from rpython.tool.udir import udir -from pypy.tool.pytest.objspace import gettestobjspace -from pypy.conftest import pypydir -from rpython.translator.c.test.test_extfunc import need_sparse_files -from rpython.rlib import rposix import os import py import sys import signal +from rpython.tool.udir import udir +from pypy.tool.pytest.objspace import gettestobjspace +from rpython.translator.c.test.test_extfunc import need_sparse_files +from rpython.rlib import rposix + + def setup_module(mod): usemodules = ['binascii', 'posix', 'signal', 'struct', 'time'] # py3k os.open uses subprocess, requiring the following per platform @@ -46,7 +44,6 @@ # space.call_method(space.getbuiltinmodule('sys'), 'getfilesystemencoding') - GET_POSIX = "(): import %s as m ; return m" % os.name @@ -418,7 +415,6 @@ def test_execv_no_args(self): os = self.posix raises(ValueError, os.execv, "notepad", []) - raises(ValueError, os.execve, "notepad", [], {}) def test_execv_raising2(self): os = self.posix @@ -540,6 +536,9 @@ assert os.stat(path).st_atime > t0 os.utime(path, (int(t0), int(t0))) assert int(os.stat(path).st_atime) == int(t0) + t1 = time() + os.utime(path, (int(t1), int(t1))) + assert int(os.stat(path).st_atime) == int(t1) def test_utime_raises(self): os = self.posix @@ -984,6 +983,19 @@ data = f.read() assert data == "who cares?" + # XXX skip test if dir_fd is unsupported + def test_symlink_fd(self): + posix = self.posix + bytes_dir = self.bytes_dir + f = posix.open(bytes_dir, posix.O_RDONLY) + try: + posix.symlink('somefile', 'somelink', dir_fd=f) + assert (posix.readlink(bytes_dir + '/somelink'.encode()) == + 'somefile'.encode()) + finally: + posix.close(f) + posix.unlink(bytes_dir + '/somelink'.encode()) + if hasattr(os, 'ftruncate'): def test_truncate(self): posix = self.posix @@ -1227,38 +1239,6 @@ assert content == b"test" -class TestPexpect(object): - # XXX replace with AppExpectTest class as soon as possible - def setup_class(cls): - try: - import pexpect - except ImportError: - py.test.skip("pexpect not found") - - def _spawn(self, *args, **kwds): - import pexpect - kwds.setdefault('timeout', 600) - print 'SPAWN:', args, kwds - child = pexpect.spawn(*args, maxread=5000, **kwds) - child.logfile = sys.stdout - return child - - def spawn(self, argv): - py_py = py.path.local(pypydir).join('bin', 'pyinteractive.py') - return self._spawn(sys.executable, [str(py_py), '-S'] + argv) - - def test_ttyname(self): - source = py.code.Source(""" - import os, sys - assert os.ttyname(sys.stdin.fileno()) - print('ok!') - """) - f = udir.join("test_ttyname.py") - f.write(source) - child = self.spawn([str(f)]) - child.expect('ok!') - - class AppTestFdVariants: # Tests variant functions which also accept file descriptors, # dir_fd and follow_symlinks. From pypy.commits at gmail.com Wed Apr 6 11:49:59 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 08:49:59 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge follow_symlinks Message-ID: <57053027.c711c30a.86149.5830@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83545:5f5d84aa2cd6 Date: 2016-04-06 16:44 +0100 http://bitbucket.org/pypy/pypy/changeset/5f5d84aa2cd6/ Log: hg merge follow_symlinks diff --git a/pypy/module/posix/test/test_interp_posix.py b/pypy/module/posix/test/test_interp_posix.py new file mode 100644 --- /dev/null +++ b/pypy/module/posix/test/test_interp_posix.py @@ -0,0 +1,55 @@ +import sys + +import py +from hypothesis import given +from hypothesis.strategies import integers + +from rpython.tool.udir import udir +from pypy.conftest import pypydir +from pypy.module.posix.interp_posix import convert_seconds + +class TestPexpect(object): + # XXX replace with AppExpectTest class as soon as possible + def setup_class(cls): + try: + import pexpect + except ImportError: + py.test.skip("pexpect not found") + + def _spawn(self, *args, **kwds): + import pexpect + kwds.setdefault('timeout', 600) + print 'SPAWN:', args, kwds + child = pexpect.spawn(*args, maxread=5000, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv): + py_py = py.path.local(pypydir).join('bin', 'pyinteractive.py') + return self._spawn(sys.executable, [str(py_py), '-S'] + argv) + + def test_ttyname(self): + source = py.code.Source(""" + import os, sys + assert os.ttyname(sys.stdin.fileno()) + print('ok!') + """) + f = udir.join("test_ttyname.py") + f.write(source) + child = self.spawn([str(f)]) + child.expect('ok!') + + +def test_convert_seconds_simple(space): + w_time = space.wrap(123.456) + assert convert_seconds(space, w_time) == (123, 456000000) + + at given(s=integers(min_value=-2**30, max_value=2**30), + ns=integers(min_value=0, max_value=10**9)) +def test_convert_seconds_full(space, s, ns): + w_time = space.wrap(s + ns * 1e-9) + sec, nsec = convert_seconds(space, w_time) + assert 0 <= nsec < 1e9 + MAX_ERR = 1e9 / 2**23 + 1 # nsec has 53 - 30 = 23 bits of precisin + err = (sec * 10**9 + nsec) - (s * 10**9 + ns) + assert -MAX_ERR < err < MAX_ERR From pypy.commits at gmail.com Wed Apr 6 11:49:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 08:49:57 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Add missing test file Message-ID: <57053025.90051c0a.85c37.578a@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83544:797a89d6950c Date: 2016-04-06 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/797a89d6950c/ Log: Add missing test file diff --git a/pypy/module/posix/test/test_interp_posix.py b/pypy/module/posix/test/test_interp_posix.py new file mode 100644 --- /dev/null +++ b/pypy/module/posix/test/test_interp_posix.py @@ -0,0 +1,55 @@ +import sys + +import py +from hypothesis import given +from hypothesis.strategies import integers + +from rpython.tool.udir import udir +from pypy.conftest import pypydir +from pypy.module.posix.interp_posix import convert_seconds + +class TestPexpect(object): + # XXX replace with AppExpectTest class as soon as possible + def setup_class(cls): + try: + import pexpect + except ImportError: + py.test.skip("pexpect not found") + + def _spawn(self, *args, **kwds): + import pexpect + kwds.setdefault('timeout', 600) + print 'SPAWN:', args, kwds + child = pexpect.spawn(*args, maxread=5000, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv): + py_py = py.path.local(pypydir).join('bin', 'pyinteractive.py') + return self._spawn(sys.executable, [str(py_py), '-S'] + argv) + + def test_ttyname(self): + source = py.code.Source(""" + import os, sys + assert os.ttyname(sys.stdin.fileno()) + print('ok!') + """) + f = udir.join("test_ttyname.py") + f.write(source) + child = self.spawn([str(f)]) + child.expect('ok!') + + +def test_convert_seconds_simple(space): + w_time = space.wrap(123.456) + assert convert_seconds(space, w_time) == (123, 456000000) + + at given(s=integers(min_value=-2**30, max_value=2**30), + ns=integers(min_value=0, max_value=10**9)) +def test_convert_seconds_full(space, s, ns): + w_time = space.wrap(s + ns * 1e-9) + sec, nsec = convert_seconds(space, w_time) + assert 0 <= nsec < 1e9 + MAX_ERR = 1e9 / 2**23 + 1 # nsec has 53 - 30 = 23 bits of precisin + err = (sec * 10**9 + nsec) - (s * 10**9 + ns) + assert -MAX_ERR < err < MAX_ERR From pypy.commits at gmail.com Wed Apr 6 12:18:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 09:18:24 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: hg merge default Message-ID: <570536d0.82bb1c0a.d1b42.ffff8990@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83546:cb511f4d3fb8 Date: 2016-04-06 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/cb511f4d3fb8/ Log: hg merge default diff too long, truncating to 2000 out of 10351 lines diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -46,7 +46,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,20 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -291,13 +291,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,23 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + raises(TypeError, _promote, u"abc") + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -137,7 +137,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", @@ -156,7 +156,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2960,6 +2960,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2968,6 +2998,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -79,54 +79,45 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - return _raise_typeerror - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -47,7 +47,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty @@ -61,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,10 @@ import pytest +from .infrastructure import sane_term - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty @@ -11,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -939,9 +939,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +949,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1092,8 +1092,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -530,7 +530,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -212,7 +212,7 @@ # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) # op.setarg(1, ConstInt(scale)) # op.setarg(2, v_length) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(opnum) or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -324,13 +324,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -370,8 +370,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -400,7 +400,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 @@ -633,8 +632,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -708,7 +706,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,7 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +22,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -286,7 +286,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.opnum + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -298,8 +299,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -675,7 +718,7 @@ assert not hasattr(loop, '_targettoken') for i in range(position): op = loop.operations[i] - if (not op.has_no_side_effect() + if (not rop.has_no_side_effect(op.opnum) or op.type not in (INT, FLOAT)): position = i break # cannot move the LABEL later @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -645,15 +645,28 @@ pass elif gloc is not bloc: self.mov(gloc, bloc) + offset = self.mc.get_relative_pos() self.mc.JMP_l(0) + self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) - offset = self.mc.get_relative_pos() - 4 rawstart = self.materialize_loop(looptoken) - # update the jump to the real trace - self._patch_jump_for_descr(rawstart + offset, asminfo.rawstart) + # update the jump (above) to the real trace + self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def _patch_jump_to(self, adr_jump_offset, adr_new_target): + assert adr_jump_offset != 0 + offset = adr_new_target - (adr_jump_offset + 5) + mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) + if rx86.fits_in_32bits(offset): + mc.JMP_l(offset) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.copy_to_raw_memory(adr_jump_offset) + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -791,10 +804,6 @@ def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr.adr_jump_offset - self._patch_jump_for_descr(adr_jump_offset, adr_new_target) - faildescr.adr_jump_offset = 0 # means "patched" - - def _patch_jump_for_descr(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -815,6 +824,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -358,11 +358,11 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if op.is_jit_debug(): + if rop.is_jit_debug(op.getopnum()): return for arg in op.getarglist(): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -476,7 +476,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.getopnum() + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -488,8 +489,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = prepare_oplist[opnum](self, op) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -688,6 +688,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars @@ -718,6 +722,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1316,6 +1316,21 @@ tr = Transformer(None, None) py.test.raises(NotImplementedError, tr.rewrite_operation, op) +def test_no_fixedsizearray(): + A = lltype.FixedSizeArray(lltype.Signed, 5) + v_x = varoftype(lltype.Ptr(A)) + op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)], + varoftype(lltype.Signed)) + tr = Transformer(None, None) + tr.graph = 'demo' + py.test.raises(NotImplementedError, tr.rewrite_operation, op) + op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed), + Constant(42, lltype.Signed)], + varoftype(lltype.Void)) + e = py.test.raises(NotImplementedError, tr.rewrite_operation, op) + assert str(e.value) == ( + "'demo' uses %r, which is not supported by the JIT codewriter" % (A,)) + def _test_threadlocalref_get(loop_inv): from rpython.rlib.rthread import ThreadLocalField tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1585,7 +1585,6 @@ def _done_with_this_frame(self): # rare case: we only get there if the blackhole interps all returned # normally (in general we get a ContinueRunningNormally exception). From pypy.commits at gmail.com Wed Apr 6 12:21:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 09:21:43 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge py3k Message-ID: <57053797.8673c20a.c9221.60ee@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83547:f19923c93568 Date: 2016-04-06 17:21 +0100 http://bitbucket.org/pypy/pypy/changeset/f19923c93568/ Log: hg merge py3k diff too long, truncating to 2000 out of 10581 lines diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,7 +48,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,20 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -247,8 +247,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -316,13 +316,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback def remove_traceback_module_frames(self, module_name): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -88,6 +88,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', 'normalize_exc' : 'interp_magic.normalize_exc', 'StdErrPrinter' : 'interp_stderrprinter.W_StdErrPrinter', } diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -148,6 +148,26 @@ raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj + @unwrap_spec(w_value=WrappedDefault(None), w_tb=WrappedDefault(None)) def normalize_exc(space, w_type, w_value=None, w_tb=None): operr = OperationError(w_type, w_value, w_tb) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -51,3 +51,16 @@ assert decode_long(b'\x00\x80', 'little', False) == 32768 assert decode_long(b'\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + raises(TypeError, _promote, u"abc") + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -60,6 +62,8 @@ pass """, d) + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec_("""def foo2(): diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -54,16 +54,16 @@ import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) - buf = buffer(arr) + buf = memoryview(arr) exc = raises(TypeError, "buf[1] = '1'") - assert str(exc.value) == "buffer is read-only" + assert str(exc.value) == "cannot modify read-only memory" if sys.byteorder == 'big': - assert str(buf) == (b'\0\0\0\x01' + assert bytes(buf) == (b'\0\0\0\x01' b'\0\0\0\x02' b'\0\0\0\x03' b'\0\0\0\x04') else: - assert str(buf) == (b'\x01\0\0\0' + assert bytes(buf) == (b'\x01\0\0\0' b'\x02\0\0\0' b'\x03\0\0\0' b'\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -8,35 +8,22 @@ applevel_name = '_imp' interpleveldefs = { - 'SEARCH_ERROR': 'space.wrap(importing.SEARCH_ERROR)', - 'PY_SOURCE': 'space.wrap(importing.PY_SOURCE)', - 'PY_COMPILED': 'space.wrap(importing.PY_COMPILED)', - 'C_EXTENSION': 'space.wrap(importing.C_EXTENSION)', - 'PKG_DIRECTORY': 'space.wrap(importing.PKG_DIRECTORY)', - 'C_BUILTIN': 'space.wrap(importing.C_BUILTIN)', - 'PY_FROZEN': 'space.wrap(importing.PY_FROZEN)', - 'IMP_HOOK': 'space.wrap(importing.IMP_HOOK)', - 'get_suffixes': 'interp_imp.get_suffixes', 'extension_suffixes': 'interp_imp.extension_suffixes', 'get_magic': 'interp_imp.get_magic', 'get_tag': 'interp_imp.get_tag', 'load_dynamic': 'interp_imp.load_dynamic', - 'new_module': 'interp_imp.new_module', 'init_builtin': 'interp_imp.init_builtin', 'init_frozen': 'interp_imp.init_frozen', 'is_builtin': 'interp_imp.is_builtin', 'is_frozen': 'interp_imp.is_frozen', 'get_frozen_object': 'interp_imp.get_frozen_object', 'is_frozen_package': 'interp_imp.is_frozen_package', - 'NullImporter': 'importing.W_NullImporter', 'lock_held': 'interp_imp.lock_held', 'acquire_lock': 'interp_imp.acquire_lock', 'release_lock': 'interp_imp.release_lock', - 'cache_from_source': 'interp_imp.cache_from_source', - 'source_from_cache': 'interp_imp.source_from_cache', '_fix_co_filename': 'interp_imp.fix_co_filename', } diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -20,17 +20,6 @@ _WIN32 = sys.platform == 'win32' -SEARCH_ERROR = 0 -PY_SOURCE = 1 -PY_COMPILED = 2 -C_EXTENSION = 3 -# PY_RESOURCE = 4 -PKG_DIRECTORY = 5 -C_BUILTIN = 6 -PY_FROZEN = 7 -# PY_CODERESOURCE = 8 -IMP_HOOK = 9 - SO = '.pyd' if _WIN32 else '.so' PREFIX = 'pypy3-' DEFAULT_SOABI = '%s%d%d' % ((PREFIX,) + PYPY_VERSION[:2]) @@ -104,40 +93,6 @@ def as_unicode(self): return self.path -class W_NullImporter(W_Root): - def __init__(self, space): - pass - - def descr_init(self, space, w_path): - self._descr_init(space, w_path, _WIN32) - - @specialize.arg(3) - def _descr_init(self, space, w_path, win32): - path = space.unicode0_w(w_path) if win32 else space.fsencode_w(w_path) - if not path: - raise OperationError(space.w_ImportError, space.wrap( - "empty pathname")) - - # Directory should not exist - try: - st = rposix_stat.stat(_WIN32Path(path) if win32 else path) - except OSError: - pass - else: - if stat.S_ISDIR(st.st_mode): - raise OperationError(space.w_ImportError, space.wrap( - "existing directory")) - - def find_module_w(self, space, __args__): - return space.wrap(None) - -W_NullImporter.typedef = TypeDef( - 'imp.NullImporter', - __new__=generic_new_descr(W_NullImporter), - __init__=interp2app(W_NullImporter.descr_init), - find_module=interp2app(W_NullImporter.find_module_w), - ) - def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap space.sys.setmodule(w_mod) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -12,19 +12,6 @@ from pypy.interpreter.streamutil import wrap_streamerror -def get_suffixes(space): - w = space.wrap - suffixes_w = [] - if importing.has_so_extension(space): - suffixes_w.append( - space.newtuple([w(importing.get_so_extension(space)), - w('rb'), w(importing.C_EXTENSION)])) - suffixes_w.extend([ - space.newtuple([w('.py'), w('U'), w(importing.PY_SOURCE)]), - space.newtuple([w('.pyc'), w('rb'), w(importing.PY_COMPILED)]), - ]) - return space.newlist(suffixes_w) - def extension_suffixes(space): suffixes_w = [] if space.config.objspace.usemodules.cpyext: @@ -77,9 +64,6 @@ return importing.check_sys_modules(space, w_modulename) -def new_module(space, w_name): - return space.wrap(Module(space, w_name, add_package=False)) - def init_builtin(space, w_name): name = space.str0_w(w_name) if name not in space.builtin_modules: @@ -135,34 +119,6 @@ importing.getimportlock(space).reinit_lock() @unwrap_spec(pathname='fsencode') -def cache_from_source(space, pathname, w_debug_override=None): - """cache_from_source(path, [debug_override]) -> path - Given the path to a .py file, return the path to its .pyc/.pyo file. - - The .py file does not need to exist; this simply returns the path to the - .pyc/.pyo file calculated as if the .py file were imported. The extension - will be .pyc unless __debug__ is not defined, then it will be .pyo. - - If debug_override is not None, then it must be a boolean and is taken as - the value of __debug__ instead.""" - return space.fsdecode(space.wrapbytes( - importing.make_compiled_pathname(pathname))) - - at unwrap_spec(pathname='fsencode') -def source_from_cache(space, pathname): - """source_from_cache(path) -> path - Given the path to a .pyc./.pyo file, return the path to its .py file. - - The .pyc/.pyo file does not need to exist; this simply returns the path to - the .py file calculated to correspond to the .pyc/.pyo file. If path - does not conform to PEP 3147 format, ValueError will be raised.""" - sourcename = importing.make_source_pathname(pathname) - if sourcename is None: - raise oefmt(space.w_ValueError, - "Not a PEP 3147 pyc path: %s", pathname) - return space.fsdecode(space.wrapbytes(sourcename)) - - at unwrap_spec(pathname='fsencode') def fix_co_filename(space, w_code, pathname): code_w = space.interp_w(PyCode, w_code) importing.update_code_filenames(space, code_w, pathname) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2951,6 +2951,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2959,6 +2989,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -23,51 +23,42 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, str) or + not all(isinstance(a, str) for a in attrs) + ): + raise TypeError("attribute name must be a string, not %r" % + type(attr).__name__) + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, str) or - not all(isinstance(a, str) for a in attrs) - ): - raise TypeError("attribute name must be a string, not %r" % - type(attr).__name__) - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -50,7 +50,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1164,7 +1164,6 @@ os._exit(status) def execv(space, w_path, w_args): - """ execv(path, args) Execute an executable path with arguments, replacing current process. diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -329,6 +329,10 @@ "usemodules": ["select", "_socket", "time", "thread"], } + import os + if os.uname()[4] == 's390x': + py.test.skip("build bot for s390x cannot open sockets") + def w_make_server(self): import socket if hasattr(self, 'sock'): diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty @@ -61,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,10 @@ import pytest +from .infrastructure import sane_term - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty @@ -11,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -939,9 +939,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +949,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1092,8 +1092,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -530,7 +530,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -212,7 +212,7 @@ # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) # op.setarg(1, ConstInt(scale)) # op.setarg(2, v_length) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(opnum) or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -324,13 +324,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -370,8 +370,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -400,7 +400,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 @@ -633,8 +632,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -708,7 +706,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,7 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +22,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -286,7 +286,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.opnum + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -298,8 +299,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -675,7 +718,7 @@ assert not hasattr(loop, '_targettoken') for i in range(position): op = loop.operations[i] - if (not op.has_no_side_effect() + if (not rop.has_no_side_effect(op.opnum) or op.type not in (INT, FLOAT)): position = i break # cannot move the LABEL later @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH From pypy.commits at gmail.com Wed Apr 6 14:41:07 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 11:41:07 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Allow specifying path as a file descriptor in statvfs() Message-ID: <57055843.53371c0a.b7c23.ffffac0c@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83548:afa5e4d31986 Date: 2016-04-06 19:40 +0100 http://bitbucket.org/pypy/pypy/changeset/afa5e4d31986/ Log: Allow specifying path as a file descriptor in statvfs() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -432,7 +432,9 @@ On some platforms, path may also be specified as an open file descriptor. If this functionality is unavailable, using it raises an exception.""" try: - st = dispatch_filename(rposix_stat.statvfs)(space, w_path) + st = dispatch_filename( + rposix_stat.statvfs, + allow_fd_fn=rposix_stat.fstatvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: From pypy.commits at gmail.com Wed Apr 6 15:01:34 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 12:01:34 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Reuse rposix definition of TIMESPEC in rposix_stat Message-ID: <57055d0e.89cbc20a.418ed.ffffa304@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83549:f79b252981fb Date: 2016-04-06 20:00 +0100 http://bitbucket.org/pypy/pypy/changeset/f79b252981fb/ Log: Reuse rposix definition of TIMESPEC in rposix_stat diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -36,13 +36,12 @@ # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. -if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): - TIMESPEC = platform.Struct('struct timespec', - [('tv_sec', rffi.TIME_T), - ('tv_nsec', rffi.LONG)]) -else: +try: + from rpython.rlib.rposix import TIMESPEC +except ImportError: TIMESPEC = None + # all possible fields - some of them are not available on all platforms ALL_STAT_FIELDS = [ ("st_mode", lltype.Signed), @@ -300,13 +299,6 @@ includes=INCLUDES ) -if TIMESPEC is not None: - class CConfig_for_timespec: - _compilation_info_ = compilation_info - TIMESPEC = TIMESPEC - TIMESPEC = lltype.Ptr( - platform.configure(CConfig_for_timespec)['TIMESPEC']) - def posix_declaration(try_to_add=None): global STAT_STRUCT, STATVFS_STRUCT @@ -322,7 +314,7 @@ if _name == originalname: # replace the 'st_atime' field of type rffi.DOUBLE # with a field 'st_atim' of type 'struct timespec' - lst[i] = (timespecname, TIMESPEC.TO) + lst[i] = (timespecname, TIMESPEC) break _expand(LL_STAT_FIELDS, 'st_atime', 'st_atim') From pypy.commits at gmail.com Wed Apr 6 15:32:36 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 06 Apr 2016 12:32:36 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add wrapper for fstatat() Message-ID: <57056454.657bc20a.12cb2.ffffad06@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83550:464d641a9035 Date: 2016-04-06 20:32 +0100 http://bitbucket.org/pypy/pypy/changeset/464d641a9035/ Log: Add wrapper for fstatat() diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -23,6 +23,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.rposix import ( replace_os_function, handle_posix_error, _as_bytes0) +from rpython.rlib import rposix _WIN32 = sys.platform.startswith('win') _LINUX = sys.platform.startswith('linux') @@ -504,6 +505,23 @@ path = traits.as_str0(path) return win32_xstat(traits, path, traverse=False) +if rposix.HAVE_FSTATAT: + from rpython.rlib.rposix import AT_FDCWD, AT_SYMLINK_NOFOLLOW + c_fstatat = rffi.llexternal('fstatat', + [rffi.INT, rffi.CCHARP, STAT_STRUCT, rffi.INT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, macro=True) + + def fstatat(pathname, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flags = 0 + else: + flags = AT_SYMLINK_NOFOLLOW + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + error = c_fstatat(dir_fd, pathname, stresult, flags) + handle_posix_error('fstatat', error) + return build_stat_result(stresult) + @replace_os_function('fstatvfs') def fstatvfs(fd): with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult: diff --git a/rpython/rlib/test/test_rposix_stat.py b/rpython/rlib/test/test_rposix_stat.py --- a/rpython/rlib/test/test_rposix_stat.py +++ b/rpython/rlib/test/test_rposix_stat.py @@ -56,3 +56,13 @@ except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) rposix_stat.fstatvfs(0) + + at py.test.mark.skipif("not hasattr(rposix_stat, 'fstatat')") +def test_fstatat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + result = rposix_stat.fstatat('file', dir_fd=dirfd, follow_symlinks=False) + finally: + os.close(dirfd) + assert result.st_atime == tmpdir.join('file').atime() From pypy.commits at gmail.com Wed Apr 6 15:43:21 2016 From: pypy.commits at gmail.com (antocuni) Date: Wed, 06 Apr 2016 12:43:21 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: move the logic from __del__ to destructor, because we are not allowed to call arbitrary code from the interp-level __del__ Message-ID: <570566d9.26b0c20a.f195b.ffffb460@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83551:110eae852734 Date: 2016-04-06 19:39 +0000 http://bitbucket.org/pypy/pypy/changeset/110eae852734/ Log: move the logic from __del__ to destructor, because we are not allowed to call arbitrary code from the interp-level __del__ diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -49,15 +49,16 @@ # thread that runs __del__, so no race condition should be possible self.clear_all_weakrefs() if self.stream is not None: - if self.space.sys.resource_warning_enabled: - w_repr = self.space.repr(self) - str_repr = self.space.str_w(w_repr) - self.space.resource_warning("WARNING: unclosed file: " + str_repr) self.enqueue_for_destruction(self.space, W_File.destructor, 'close() method of ') def destructor(self): assert isinstance(self, W_File) + if self.space.sys.resource_warning_enabled: + w_repr = self.space.repr(self) + str_repr = self.space.str_w(w_repr) + self.space.resource_warning("WARNING: unclosed file: " + str_repr) + # try: self.direct_close() except StreamErrors, e: From pypy.commits at gmail.com Thu Apr 7 02:34:25 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 06 Apr 2016 23:34:25 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: rvmprof.enable_jitlog now correctly writes the header, teardown must still be called correctly Message-ID: <5705ff71.d7b81c0a.6028b.343f@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83552:acf792f13092 Date: 2016-04-07 08:33 +0200 http://bitbucket.org/pypy/pypy/changeset/acf792f13092/ Log: rvmprof.enable_jitlog now correctly writes the header, teardown must still be called correctly diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -534,7 +534,7 @@ looptoken._ll_function_addr = rawstart if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, None, ops_offset=ops_offset, unique_id=unique_id) + log.write(inputargs, operations, None, ops_offset=ops_offset, unique_id=rawstart) self.fixup_target_tokens(rawstart) self.teardown() # oprofile support @@ -590,7 +590,7 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, faildescr, ops_offset) + log.write(inputargs, operations, faildescr, ops_offset, unique_id=rawstart) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py --- a/rpython/jit/backend/x86/test/test_jitlog.py +++ b/rpython/jit/backend/x86/test/test_jitlog.py @@ -27,9 +27,9 @@ assert len(fd.read()) > 0 print(name) - def test_venv(self): + def test_env(self, monkeypatch): fileno, name = tempfile.mkstemp() - os.environ["JITLOG"] = name + monkeypatch.setenv("JITLOG", name) self.run_sample_loop(None) assert os.path.exists(name) with open(name, 'rb') as fd: @@ -55,4 +55,3 @@ y -= 1 return res res = self.meta_interp(f, [6, 20]) - self.check_trace_count(2) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -68,28 +68,31 @@ def __init__(self): self.cintf = cintf.setup() self.memo = {} - self.is_setup = False def setup_once(self): - if self.is_setup: + if self.cintf.jitlog_enabled(): return - self.is_setup = True self.cintf.jitlog_try_init_using_env() if not self.cintf.jitlog_enabled(): return + VMProfJitLogger._write_header(self.cintf) + @staticmethod + @always_inline + def _write_header(cintf): header = encode_le_16bit(0xaffe) - self._write_marked(MARK_JITLOG_HEADER, header) + cintf.jitlog_write_marked(MARK_JITLOG_HEADER, + header, len(header)) count = len(resoperations.opname) mark = MARK_RESOP_META for opnum, opname in resoperations.opname.items(): line = encode_le_16bit(opnum) + encode_str(opname.lower()) - self._write_marked(mark, line) + cintf.jitlog_write_marked(mark, line, len(line)) def teardown(self): + import pdb; pdb.set_trace() self.cintf.jitlog_teardown() - self.is_setup = False def _write_marked(self, mark, line): if not we_are_translated(): @@ -156,8 +159,9 @@ encode_str(name or '') log._write_marked(self.tag, string) else: - unique_id = compute_unique_id(faildescr) + descr_number = compute_unique_id(faildescr) string = encode_str('bridge') + \ + encode_le_addr(descr_number) + \ encode_le_addr(unique_id) + \ encode_str(name or '') log._write_marked(self.tag, string) diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -120,6 +120,8 @@ p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) + from rpython.jit.metainterp.jitlog import VMProfJitLogger + VMProfJitLogger._write_header(self.cintf) def disable(self): """Disable vmprof. diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h --- a/rpython/rlib/rvmprof/src/jitlog_main.h +++ b/rpython/rlib/rvmprof/src/jitlog_main.h @@ -22,7 +22,7 @@ if (filename && filename[0]) { // mode is 775 mode_t mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH; - jitlog_fd = open(filename, O_WRONLY | O_CREAT, mode); + jitlog_fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, mode); if (jitlog_fd == -1) { dprintf(2, "could not open '%s': ", filename); perror(NULL); From pypy.commits at gmail.com Thu Apr 7 02:41:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 06 Apr 2016 23:41:49 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: finish is now called in the testing case Message-ID: <5706012d.e7bec20a.ac70.4a5b@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83553:dacac0d48a10 Date: 2016-04-07 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/dacac0d48a10/ Log: finish is now called in the testing case diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -90,8 +90,7 @@ line = encode_le_16bit(opnum) + encode_str(opname.lower()) cintf.jitlog_write_marked(mark, line, len(line)) - def teardown(self): - import pdb; pdb.set_trace() + def finish(self): self.cintf.jitlog_teardown() def _write_marked(self, mark, line): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -119,6 +119,7 @@ return interp, graph res = interp.eval_graph(graph, args) if not kwds.get('translate_support_code', False): + warmrunnerdesc.metainterp_sd.jitlog.finish() warmrunnerdesc.metainterp_sd.profiler.finish() warmrunnerdesc.metainterp_sd.cpu.finish_once() print '~~~ return value:', repr(res) From pypy.commits at gmail.com Thu Apr 7 03:30:45 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 00:30:45 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: import stmgc Message-ID: <57060ca5.d7b81c0a.6028b.4929@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83554:b19666f40456 Date: 2016-04-06 15:21 +0300 http://bitbucket.org/pypy/pypy/changeset/b19666f40456/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -b7f8a106095f +ff3079618aaf diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -1033,6 +1033,13 @@ check_nursery_at_transaction_start(); + if (tl->mem_reset_on_abort) { + assert(!!tl->mem_stored_for_reset_on_abort); + memcpy(tl->mem_stored_for_reset_on_abort, tl->mem_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); + } + + /* Change read-version here, because if we do stm_validate in the safe-point below, we should not see our old reads from the last transaction. */ @@ -1432,6 +1439,9 @@ if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); + if (tl->mem_reset_on_abort) + memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); invoke_and_clear_user_callbacks(1); /* for abort */ diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -76,6 +76,11 @@ the following raw region of memory is cleared. */ char *mem_clear_on_abort; size_t mem_bytes_to_clear_on_abort; + /* mechanism to reset a memory location to the value it had at the start + of the transaction in case of an abort */ + char *mem_reset_on_abort; /* addr */ + size_t mem_bytes_to_reset_on_abort; /* how many bytes */ + char *mem_stored_for_reset_on_abort; /* content at tx start */ /* the next fields are handled internally by the library */ int last_associated_segment_num; /* always a valid seg num */ int thread_local_counter; From pypy.commits at gmail.com Thu Apr 7 03:30:47 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 00:30:47 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: tiny fix Message-ID: <57060ca7.c3941c0a.9d6e9.ffff8134@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83555:5f2e0bc7fa51 Date: 2016-04-07 10:27 +0300 http://bitbucket.org/pypy/pypy/changeset/5f2e0bc7fa51/ Log: tiny fix diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -5,6 +5,7 @@ from rpython.translator.unsimplify import varoftype from rpython.translator.backendopt.dataflow import AbstractForwardDataFlowAnalysis from rpython.translator.backendopt.support import var_needsgc +from rpython.translator.simplify import join_blocks ALWAYS_ALLOW_OPERATIONS = set([ 'force_cast', 'keepalive', 'cast_ptr_to_adr', @@ -268,6 +269,9 @@ varoftype(lltype.Void)) def insert_turn_inevitable(stmtransformer, graph): + # needed for cases where stm_ignored_stop is in its own block: + join_blocks(graph) + ia = InevitableAnalysis(stmtransformer.break_analyzer) ia.calculate(graph) # From pypy.commits at gmail.com Thu Apr 7 03:30:49 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 00:30:49 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: missing renaming to stm_spinlock Message-ID: <57060ca9.49f9c20a.492ef.5a1d@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83556:ddb53f437889 Date: 2016-04-07 10:27 +0300 http://bitbucket.org/pypy/pypy/changeset/ddb53f437889/ Log: missing renaming to stm_spinlock diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -64,8 +64,8 @@ // spinlock_acquire/spinlock_release defined in ../../stm/src_stm/stmgcintf.h static uint8_t pypy_debug_alloc_lock = 0; #else -# define spinlock_acquire(lock) /* nothing */ -# define spinlock_release(lock) /* nothing */ +# define stm_spinlock_acquire(lock) /* nothing */ +# define stm_spinlock_release(lock) /* nothing */ #endif RPY_EXTERN @@ -75,10 +75,10 @@ RPyAssert(p, "out of memory"); p->addr = addr; p->funcname = funcname; - spinlock_acquire(pypy_debug_alloc_lock); + stm_spinlock_acquire(pypy_debug_alloc_lock); p->next = pypy_debug_alloc_list; pypy_debug_alloc_list = p; - spinlock_release(pypy_debug_alloc_lock); + stm_spinlock_release(pypy_debug_alloc_lock); } RPY_EXTERN @@ -87,18 +87,18 @@ struct pypy_debug_alloc_s **p; if (!addr) return 1; - spinlock_acquire(pypy_debug_alloc_lock); + stm_spinlock_acquire(pypy_debug_alloc_lock); for (p = &pypy_debug_alloc_list; *p; p = &((*p)->next)) if ((*p)->addr == addr) { struct pypy_debug_alloc_s *dying; dying = *p; *p = dying->next; - spinlock_release(pypy_debug_alloc_lock); + stm_spinlock_release(pypy_debug_alloc_lock); free(dying); return 1; } - spinlock_release(pypy_debug_alloc_lock); + stm_spinlock_release(pypy_debug_alloc_lock); return 0; } @@ -114,7 +114,7 @@ { long count = 0; struct pypy_debug_alloc_s *p; - spinlock_acquire(pypy_debug_alloc_lock); + stm_spinlock_acquire(pypy_debug_alloc_lock); for (p = pypy_debug_alloc_list; p; p = p->next) count++; if (count > 0) @@ -130,7 +130,7 @@ else fprintf(stderr, " (use PYPY_ALLOC=1 to see the list)\n"); } - spinlock_release(pypy_debug_alloc_lock); + stm_spinlock_release(pypy_debug_alloc_lock); } #endif /* RPY_ASSERT */ From pypy.commits at gmail.com Thu Apr 7 03:30:51 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 00:30:51 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: attempt to fix JIT support for vmprof for STM by resetting the thread-local Message-ID: <57060cab.8673c20a.c9221.5f11@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83557:39060b21f026 Date: 2016-04-07 10:29 +0300 http://bitbucket.org/pypy/pypy/changeset/39060b21f026/ Log: attempt to fix JIT support for vmprof for STM by resetting the thread-local variable that points to the frame on abort. diff --git a/rpython/jit/backend/x86/test/test_zrpy_vmprof.py b/rpython/jit/backend/x86/test/test_zrpy_vmprof.py --- a/rpython/jit/backend/x86/test/test_zrpy_vmprof.py +++ b/rpython/jit/backend/x86/test/test_zrpy_vmprof.py @@ -1,7 +1,22 @@ from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest +from rpython.translator.translator import TranslationContext + +class TestZVMprofSTM(CompiledVmprofTest): + gcrootfinder = "stm" + gc = "stmgc" + thread = True + stm = True + + def _get_TranslationContext(self): + t = TranslationContext() + t.config.translation.thread = True + t.config.translation.stm = True + t.config.translation.gc = "stmgc" + t.config.translation.list_comprehension_operations = True + return t class TestZVMprof(CompiledVmprofTest): gcrootfinder = "shadowstack" - gc = "incminimark" \ No newline at end of file + gc = "incminimark" diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -1,4 +1,9 @@ #include +#include + +#ifdef RPYTHON_VMPROF +#include "src/threadlocal.h" +#endif static void _stm_call_finalizer(object_t *obj) @@ -42,12 +47,23 @@ stm_register_thread_local(&stm_thread_local); stm_thread_local.mem_clear_on_abort = (char *)&pypy_g_ExcData; stm_thread_local.mem_bytes_to_clear_on_abort = sizeof(pypy_g_ExcData); + +#ifdef RPYTHON_VMPROF + stm_thread_local.mem_reset_on_abort = (char *)&pypy_threadlocal.vmprof_tl_stack; + stm_thread_local.mem_bytes_to_reset_on_abort = sizeof(pypy_threadlocal.vmprof_tl_stack); + stm_thread_local.mem_stored_for_reset_on_abort = malloc(sizeof(pypy_threadlocal.vmprof_tl_stack)); +#else + stm_thread_local.mem_reset_on_abort = NULL; +#endif } void pypy_stm_unregister_thread_local(void) { stm_unregister_thread_local(&stm_thread_local); stm_thread_local.shadowstack_base = NULL; +#ifdef RPYTHON_VMPROF + free(stm_thread_local.mem_stored_for_reset_on_abort); +#endif } From pypy.commits at gmail.com Thu Apr 7 04:27:38 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 07 Apr 2016 01:27:38 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: start working on tooling stability doc Message-ID: <570619fa.06d8c20a.38efb.7186@mx.google.com> Author: fijal Branch: extradoc Changeset: r5632:ce02b4dcb219 Date: 2016-04-07 10:59 +0300 http://bitbucket.org/pypy/extradoc/changeset/ce02b4dcb219/ Log: start working on tooling stability doc diff --git a/planning/tooling-stability.rst b/planning/tooling-stability.rst new file mode 100644 --- /dev/null +++ b/planning/tooling-stability.rst @@ -0,0 +1,11 @@ +Tooling stability +----------------- + +1) From PyPy 5, vmprof.com & the viewing of trace/assembler works with any pypy + +2) We need at least crude headless tests for vmprof.com + - performance of cpython 2.7, 3.5, pypy 5 and newer + - jitviewer + +3) local server for vmprof that loads the browser, without user management + From pypy.commits at gmail.com Thu Apr 7 04:27:40 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 07 Apr 2016 01:27:40 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <570619fc.657bc20a.12cb2.74e7@mx.google.com> Author: fijal Branch: extradoc Changeset: r5633:435c6aa7beb4 Date: 2016-04-07 11:27 +0300 http://bitbucket.org/pypy/extradoc/changeset/435c6aa7beb4/ Log: merge diff --git a/blog/draft/jit-leaner-frontend.rst b/blog/draft/jit-leaner-frontend.rst --- a/blog/draft/jit-leaner-frontend.rst +++ b/blog/draft/jit-leaner-frontend.rst @@ -46,6 +46,7 @@ pointers to store the "operations" object after tracing, we use a compact list of 16-bit integers (with 16bit pointers in between). On 64bit machine the wins are tremendous - it's 4x more efficient to use 16bit pointers than full 64bit pointers. +.. XXX: I assume you are talking about "memory efficiency": we should be clearer Additionally those pointers have a much better defined lifespan, so we don't need to bother tracking them by the GC, which also saves quite a bit of time. diff --git a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt --- a/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt +++ b/talk/bucharest2016/jit-backend-8vhY1ArTsh.txt @@ -1,4 +1,6 @@ -pypy's assembler backend +======================== +PyPy's assembler backend +======================== input: linear sequence of instructions, called a "trace". @@ -76,9 +78,9 @@ ## GC pointers -Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the positions in the registers and stack frame are fixed by the GC. That's a reason for why we don't have explicit interior pointers. +Around most CALL instructions, we need to record a description of where the GC pointers are (registers and stack frame). This is needed in case the CALL invokes a garbage collection. The GC pointers can move; the pointers in the registers and stack frame are updated by the GC. That's a reason for why we don't have explicit interior pointers. -GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" table can actually change if the GC objects move. +GC pointers can appear as constants in the trace. We are busy changing that to use a constant table and MOV REG, (%RIP+offset). The "constant" in the table is actually updated by the GC if the object move. ## Vectorization diff --git a/talk/bucharest2016/jit-frontend/Makefile b/talk/bucharest2016/jit-frontend/Makefile --- a/talk/bucharest2016/jit-frontend/Makefile +++ b/talk/bucharest2016/jit-frontend/Makefile @@ -3,7 +3,7 @@ # http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py -talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf +talk.pdf: talk.rst author.latex stylesheet.latex diagrams/tracing-phases-p0.pdf diagrams/architecture-p0.pdf diagrams/pypytrace-p0.pdf diagrams/tracetree-p0.pdf rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit @@ -22,8 +22,8 @@ # diagrams/trace-p0.pdf: diagrams/trace.svg # cd diagrams && inkscapeslide.py trace.svg -# diagrams/tracetree-p0.pdf: diagrams/tracetree.svg -# cd diagrams && inkscapeslide.py tracetree.svg +diagrams/tracetree-p0.pdf: diagrams/tracetree.svg + cd diagrams && inkscapeslide.py tracetree.svg diagrams/architecture-p0.pdf: diagrams/architecture.svg cd diagrams && inkscapeslide.py architecture.svg diff --git a/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg b/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg new file mode 100644 --- /dev/null +++ b/talk/bucharest2016/jit-frontend/diagrams/tracetree.svg @@ -0,0 +1,488 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + trace+looptrace, guard_sign+blackhole+interp+call_jittedtrace+guard_sign+bridge+loop2+loop + + + label(start, i0, a0)v0 = int_lt(i0, 2000)guard_true(v0)v1 = int_mod(i0, 2)v2 = int_eq(v1, 0)guard_true(v1)a1 = int_add(a0, 10)i1 = int_add(i0, 1)jump(start, i1, a1) + + + + + + + + a1 = int_mul(a0, 2)i1 = int_add(i0, 1)jump(start, i1, a1) + + HOT FAIL + + + + + diff --git a/talk/bucharest2016/jit-frontend/talk.pdf b/talk/bucharest2016/jit-frontend/talk.pdf index 4ad8022637420b08f8a93bc24ddfd00d4b13adb2..c74ff0a9eb5d7e988a9424ae7c94708182ad5a1b GIT binary patch [cut] diff --git a/talk/bucharest2016/jit-frontend/talk.rst b/talk/bucharest2016/jit-frontend/talk.rst --- a/talk/bucharest2016/jit-frontend/talk.rst +++ b/talk/bucharest2016/jit-frontend/talk.rst @@ -152,10 +152,38 @@ :scale: 100% -Trace trees ------------ +Trace trees (1) +--------------- -WRITE ME +|scriptsize| +|example<| |small| tracetree.py |end_small| |>| + +.. sourcecode:: python + + def foo(): + a = 0 + i = 0 + N = 100 + while i < N: + if i%2 == 0: + a += 1 + else: + a *= 2; + i += 1 + return a + +|end_example| +|end_scriptsize| + + +Trace trees (2) +--------------- + +.. animage:: diagrams/tracetree-p*.pdf + :align: center + :scale: 30% + + Part 3 ------ From pypy.commits at gmail.com Thu Apr 7 04:29:40 2016 From: pypy.commits at gmail.com (florinpapa) Date: Thu, 07 Apr 2016 01:29:40 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (florin, antocuni) IN-PROGRESS: Attach and show the creation traceback. Test fails and we don't know why Message-ID: <57061a74.6507c20a.8a93d.7877@mx.google.com> Author: florinpapa Branch: resource_warning Changeset: r83558:980ad9d11a2a Date: 2016-04-07 11:28 +0300 http://bitbucket.org/pypy/pypy/changeset/980ad9d11a2a/ Log: (florin, antocuni) IN-PROGRESS: Attach and show the creation traceback. Test fails and we don't know why diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1740,14 +1740,24 @@ _warnings.warn(msg, warningcls, stacklevel=stacklevel) """) - def resource_warning(self, msg): - w_msg = self.wrap(msg) - self.appexec([w_msg], - """(msg): + def resource_warning(self, w_msg, w_tb): + self.appexec([w_msg, w_tb], + """(msg, tb): import sys print >> sys.stderr, msg + if tb: + print >> sys.stderr, "Created at (most recent call last):" + print >> sys.stderr, tb """) + def format_traceback(self): + self.appexec([], + """(): + import traceback + return "".join(traceback.format_stack()) + """) + + class AppExecCache(SpaceCache): def build(cache, source): """ NOT_RPYTHON """ diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -38,11 +38,14 @@ errors = None fd = -1 cffi_fileobj = None # pypy/module/_cffi_backend + w_tb = None # String representation of the traceback at creation time newlines = 0 # Updated when the stream is closed def __init__(self, space): self.space = space + if self.space.sys.resource_warning_enabled: + self.w_tb = self.space.format_traceback() def __del__(self): # assume that the file and stream objects are only visible in the @@ -57,7 +60,8 @@ if self.space.sys.resource_warning_enabled: w_repr = self.space.repr(self) str_repr = self.space.str_w(w_repr) - self.space.resource_warning("WARNING: unclosed file: " + str_repr) + w_msg = self.space.wrap("WARNING: unclosed file:" + str_repr) + self.space.resource_warning(w_msg, self.w_tb) # try: self.direct_close() diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -275,7 +275,14 @@ assert fn() == "" sys.pypy_set_resource_warning(True) msg = fn() + + #f1 = open("/tmp/test", "w+") + #import pdb; pdb.set_trace() + print msg + #close(f1) + assert msg.startswith("WARNING: unclosed file: Author: Antonio Cuni Branch: resource_warning Changeset: r83559:67909a688ba0 Date: 2016-04-07 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/67909a688ba0/ Log: (antocuni, florinpapa): fix the test, it was because we returned None from space.format_traceback and the following appexec in space.resource_warning got confused diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1751,7 +1751,7 @@ """) def format_traceback(self): - self.appexec([], + return self.appexec([], """(): import traceback return "".join(traceback.format_stack()) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -60,7 +60,7 @@ if self.space.sys.resource_warning_enabled: w_repr = self.space.repr(self) str_repr = self.space.str_w(w_repr) - w_msg = self.space.wrap("WARNING: unclosed file:" + str_repr) + w_msg = self.space.wrap("WARNING: unclosed file: " + str_repr) self.space.resource_warning(w_msg, self.w_tb) # try: diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -275,12 +275,6 @@ assert fn() == "" sys.pypy_set_resource_warning(True) msg = fn() - - #f1 = open("/tmp/test", "w+") - #import pdb; pdb.set_trace() - print msg - #close(f1) - assert msg.startswith("WARNING: unclosed file: Author: Antonio Cuni Branch: resource_warning Changeset: r83560:b20b7e966a24 Date: 2016-04-07 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/b20b7e966a24/ Log: (antocuni, florinpapa): make the test better by checking the output with a regexp diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -1,5 +1,6 @@ from __future__ import with_statement import py, os, errno +from pypy.interpreter.gateway import interp2app, unwrap_spec def getfile(space): return space.appexec([], """(): @@ -17,6 +18,17 @@ cls.w_temppath = cls.space.wrap( str(py.test.ensuretemp("fileimpl").join("foo.txt"))) cls.w_file = getfile(cls.space) + # + # the following function is used e.g. in test_resource_warning + @unwrap_spec(regex=str, s=str) + def regex_search(space, regex, s): + import re + import textwrap + regex = textwrap.dedent(regex).strip() + m = re.search(regex, s) + m = bool(m) + return space.wrap(m) + cls.w_regex_search = cls.space.wrap(interp2app(regex_search)) def test_simple(self): f = self.file(self.temppath, "w") @@ -256,6 +268,7 @@ def test_resource_warning(self): import os, gc, sys, cStringIO + import re if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") def fn(): @@ -275,8 +288,13 @@ assert fn() == "" sys.pypy_set_resource_warning(True) msg = fn() - assert msg.startswith("WARNING: unclosed file: + Created at \(most recent call last\): + File ".*", line .*, in test_resource_warning + File ".*", line .*, in fn + File ".*", line .*, in anonymous + """, msg) finally: sys.pypy_set_resource_warning(False) From pypy.commits at gmail.com Thu Apr 7 05:11:32 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 02:11:32 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (antocuni, florinpapa): make the test even better Message-ID: <57062444.4412c30a.ba3b8.ffff8b9a@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83561:a2e023d51866 Date: 2016-04-07 11:10 +0200 http://bitbucket.org/pypy/pypy/changeset/a2e023d51866/ Log: (antocuni, florinpapa): make the test even better diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -271,8 +271,10 @@ import re if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") - def fn(): + def fn(flag1, flag2): + sys.pypy_set_resource_warning(flag1) f = self.file(self.temppath, 'w') + sys.pypy_set_resource_warning(flag2) g = cStringIO.StringIO() preverr = sys.stderr try: @@ -281,22 +283,27 @@ gc.collect() # force __del__ to be called finally: sys.stderr = preverr + sys.pypy_set_resource_warning(False) return g.getvalue() - try: - sys.pypy_set_resource_warning(False) - assert fn() == "" - sys.pypy_set_resource_warning(True) - msg = fn() - assert self.regex_search(r""" - WARNING: unclosed file: - Created at \(most recent call last\): - File ".*", line .*, in test_resource_warning - File ".*", line .*, in fn - File ".*", line .*, in anonymous - """, msg) - finally: - sys.pypy_set_resource_warning(False) + # check with resource_warning disabled + assert fn(False, False) == "" + # + # check with resource_warning enabled + msg = fn(True, True) + assert self.regex_search(r""" + WARNING: unclosed file: + Created at \(most recent call last\): + File ".*", line .*, in test_resource_warning + File ".*", line .*, in fn + File ".*", line .*, in anonymous + """, msg) + # + # check with resource_warning enabled in the destructor BUT with a + # file which was created when resource_warning was disabled + msg = fn(False, True) + assert self.regex_search("WARNING: unclosed file: ", msg) + assert "Created at" not in msg def test_truncate(self): f = self.file(self.temppath, "w") From pypy.commits at gmail.com Thu Apr 7 05:21:40 2016 From: pypy.commits at gmail.com (JohnDoe) Date: Thu, 07 Apr 2016 02:21:40 -0700 (PDT) Subject: [pypy-commit] pypy get-heap-stats: (fijal, catalin) - started working on better way to dump heap stats Message-ID: <570626a4.4577c20a.1ee06.ffff8e36@mx.google.com> Author: JohnDoe Branch: get-heap-stats Changeset: r83562:f794f644d3e2 Date: 2016-04-07 12:10 +0300 http://bitbucket.org/pypy/pypy/changeset/f794f644d3e2/ Log: (fijal, catalin) - started working on better way to dump heap stats diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py --- a/rpython/memory/gc/inspector.py +++ b/rpython/memory/gc/inspector.py @@ -219,6 +219,29 @@ while pending.non_empty(): self.unwriteobj(pending.pop()) +class HeapAggregator(object): + TYPEID_MAP = lltype.Struct('TYPEID_MAP', ('count', lltype.Signed), + ('size', lltype.Signed)) + + + ARRAY_TYPEID_MAP = lltype.GcArray(TYPEID_MAP) + + def __init__(self, gc, maxtypeid): + self.gc = gc + self.aggregated = lltype.malloc(self.ARRAY_TYPEID_MAP, maxtypeid) + + def writeobj(self, obj): + super(HeapDumper, self).writeobj(obj) + typeid = self.gc.get_type_id(obj) + objsize = self.gc.get_size_incl_hash(obj) + if typeid in self.aggregated: + self.aggregated[typeid][0] += objsize + self.aggregated[typeid][1] += 1 + else: + self.aggregated[typeid] = [0, 0] + self.aggregated[typeid][0] = objsize + self.aggregated[typeid][1] = 1 + def _hd_add_root(obj, heap_dumper): heap_dumper.add(obj) @@ -236,6 +259,13 @@ heapdumper.delete() return True +def get_heap_stats(gc): + heapaggreg = HeapAggregator(gc, 200) + heapaggreg.add_roots() + heapaggreg.walk(heapaggreg.pending) + + return heapaggreg.getdata() + def get_typeids_z(gc): srcaddress = gc.root_walker.gcdata.typeids_z return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(rgc.ARRAY_OF_CHAR)) diff --git a/rpython/memory/gc/test/test_inspector.py b/rpython/memory/gc/test/test_inspector.py --- a/rpython/memory/gc/test/test_inspector.py +++ b/rpython/memory/gc/test/test_inspector.py @@ -37,6 +37,15 @@ adr_q, 1, ASize(), -1] assert expected == seen + def test_get_heap_stats(self): + p = self.malloc(S) + p.x = 5 + q = self.malloc(S) + q.x = 6 + self.write(p, 'next', q) + self.stackroots.append(p) + # + inspector.H class TestHybridGC(InspectorTest): from rpython.memory.gc.hybrid import HybridGC as GCClass diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1587,6 +1587,16 @@ class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcpolicy = "incminimark" + def define_dump_rpy_stats(self): + def fn(): + return 0 + + return fn + + def test_dump_rpy_stats(self): + res = self.run("dump_rpy_stats") + assert res == 0 + def define_random_pin(self): class A: foo = None From pypy.commits at gmail.com Thu Apr 7 06:41:25 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 03:41:25 -0700 (PDT) Subject: [pypy-commit] stmgc default: avoid data race with vmprof where *during* an abort a SIGPROF signal gets Message-ID: <57063955.c856c20a.5399f.ffffb2f7@mx.google.com> Author: Remi Meier Branch: Changeset: r1986:5e3551b4e599 Date: 2016-04-07 13:41 +0300 http://bitbucket.org/pypy/stmgc/changeset/5e3551b4e599/ Log: avoid data race with vmprof where *during* an abort a SIGPROF signal gets handled and we already reset the thread-local. diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -1063,7 +1063,7 @@ } #ifdef STM_NO_AUTOMATIC_SETJMP -static int did_abort = 0; +int did_abort = 0; #endif long _stm_start_transaction(stm_thread_local_t *tl) @@ -1075,6 +1075,12 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif + if (repeat_count) { + /* only if there was an abort, we need to reset the memory: */ + if (tl->mem_reset_on_abort) + memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); + } _do_start_transaction(tl); if (repeat_count == 0) { /* else, 'nursery_mark' was already set @@ -1439,9 +1445,13 @@ if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); - if (tl->mem_reset_on_abort) - memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, - tl->mem_bytes_to_reset_on_abort); + if (tl->mem_reset_on_abort) { + /* temporarily set the memory of mem_reset_on_abort to zeros since in the + case of vmprof, the old value is really wrong if we didn't do the longjmp + back yet (that restores the C stack). We restore the memory in + _stm_start_transaction() */ + memset(tl->mem_reset_on_abort, 0, tl->mem_bytes_to_reset_on_abort); + } invoke_and_clear_user_callbacks(1); /* for abort */ diff --git a/c8/stm/setup.c b/c8/stm/setup.c --- a/c8/stm/setup.c +++ b/c8/stm/setup.c @@ -69,6 +69,10 @@ commit_log_root.rev_num = 0; commit_log_root.written_count = 0; +#ifdef STM_NO_AUTOMATIC_SETJMP + did_abort = 0; +#endif + long i; /* including seg0 */ for (i = 0; i < NB_SEGMENTS; i++) { diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -117,6 +117,11 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up); char *_stm_real_address(object_t *o); #ifdef STM_TESTS + +#ifdef STM_NO_AUTOMATIC_SETJMP +extern int did_abort; +#endif + #include uint8_t _stm_get_transaction_read_version(void); uint8_t _stm_get_card_value(object_t *obj, long idx); diff --git a/c8/test/test_extra.py b/c8/test/test_extra.py --- a/c8/test/test_extra.py +++ b/c8/test/test_extra.py @@ -50,6 +50,8 @@ p[1] = 'a' p[4] = 'i' self.abort_transaction() + assert p[0] == '\0' # impl detail + self.start_transaction() assert ffi.string(p) == "welli" def test_call_on_abort(self): From pypy.commits at gmail.com Thu Apr 7 06:55:56 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 03:55:56 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: import stmgc Message-ID: <57063cbc.0113c20a.8b306.ffffb6fa@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83563:be250c24a526 Date: 2016-04-07 13:43 +0300 http://bitbucket.org/pypy/pypy/changeset/be250c24a526/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -ff3079618aaf +5e3551b4e599 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -1063,7 +1063,7 @@ } #ifdef STM_NO_AUTOMATIC_SETJMP -static int did_abort = 0; +int did_abort = 0; #endif long _stm_start_transaction(stm_thread_local_t *tl) @@ -1075,6 +1075,12 @@ #else long repeat_count = stm_rewind_jmp_setjmp(tl); #endif + if (repeat_count) { + /* only if there was an abort, we need to reset the memory: */ + if (tl->mem_reset_on_abort) + memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, + tl->mem_bytes_to_reset_on_abort); + } _do_start_transaction(tl); if (repeat_count == 0) { /* else, 'nursery_mark' was already set @@ -1439,9 +1445,13 @@ if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); - if (tl->mem_reset_on_abort) - memcpy(tl->mem_reset_on_abort, tl->mem_stored_for_reset_on_abort, - tl->mem_bytes_to_reset_on_abort); + if (tl->mem_reset_on_abort) { + /* temporarily set the memory of mem_reset_on_abort to zeros since in the + case of vmprof, the old value is really wrong if we didn't do the longjmp + back yet (that restores the C stack). We restore the memory in + _stm_start_transaction() */ + memset(tl->mem_reset_on_abort, 0, tl->mem_bytes_to_reset_on_abort); + } invoke_and_clear_user_callbacks(1); /* for abort */ diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -69,6 +69,10 @@ commit_log_root.rev_num = 0; commit_log_root.written_count = 0; +#ifdef STM_NO_AUTOMATIC_SETJMP + did_abort = 0; +#endif + long i; /* including seg0 */ for (i = 0; i < NB_SEGMENTS; i++) { diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -117,6 +117,11 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up); char *_stm_real_address(object_t *o); #ifdef STM_TESTS + +#ifdef STM_NO_AUTOMATIC_SETJMP +extern int did_abort; +#endif + #include uint8_t _stm_get_transaction_read_version(void); uint8_t _stm_get_card_value(object_t *obj, long idx); From pypy.commits at gmail.com Thu Apr 7 06:55:58 2016 From: pypy.commits at gmail.com (Raemi) Date: Thu, 07 Apr 2016 03:55:58 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: ignore signal if we are in the process of aborting a tx in this thread Message-ID: <57063cbe.89cbc20a.418ed.ffffbeb2@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83564:3d9408e5409c Date: 2016-04-07 13:55 +0300 http://bitbucket.org/pypy/pypy/changeset/3d9408e5409c/ Log: ignore signal if we are in the process of aborting a tx in this thread diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -146,26 +146,29 @@ int fd = profile_file; assert(fd >= 0); - struct profbuf_s *p = reserve_buffer(fd); - if (p == NULL) { - /* ignore this signal: there are no free buffers right now */ + vmprof_stack_t *stack = get_vmprof_stack(); + /* for STM: check that we are not currently aborting this transaction: */ + if (stack != NULL) { + struct profbuf_s *p = reserve_buffer(fd); + if (p == NULL) { + /* ignore this signal: there are no free buffers right now */ + } + else { + int depth; + struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; + st->marker = MARKER_STACKTRACE; + st->count = 1; + depth = get_stack_trace(stack, st->stack, + MAX_STACK_DEPTH-2, GetPC((ucontext_t*)ucontext)); + st->depth = depth; + st->stack[depth++] = get_current_thread_id(); + p->data_offset = offsetof(struct prof_stacktrace_s, marker); + p->data_size = (depth * sizeof(void *) + + sizeof(struct prof_stacktrace_s) - + offsetof(struct prof_stacktrace_s, marker)); + commit_buffer(fd, p); + } } - else { - int depth; - struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; - st->marker = MARKER_STACKTRACE; - st->count = 1; - depth = get_stack_trace(get_vmprof_stack(), st->stack, - MAX_STACK_DEPTH-2, GetPC((ucontext_t*)ucontext)); - st->depth = depth; - st->stack[depth++] = get_current_thread_id(); - p->data_offset = offsetof(struct prof_stacktrace_s, marker); - p->data_size = (depth * sizeof(void *) + - sizeof(struct prof_stacktrace_s) - - offsetof(struct prof_stacktrace_s, marker)); - commit_buffer(fd, p); - } - errno = saved_errno; } From pypy.commits at gmail.com Thu Apr 7 08:57:43 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Apr 2016 05:57:43 -0700 (PDT) Subject: [pypy-commit] pypy vtune: starting Message-ID: <57065947.82bb1c0a.d1b42.0ee5@mx.google.com> Author: Armin Rigo Branch: vtune Changeset: r83565:82b4ebcd5392 Date: 2016-04-07 15:55 +0300 http://bitbucket.org/pypy/pypy/changeset/82b4ebcd5392/ Log: starting diff --git a/rpython/jit/backend/x86/vtune.py b/rpython/jit/backend/x86/vtune.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/vtune.py @@ -0,0 +1,42 @@ +""" +Support for VTune Amplifier +""" + +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +eci = ExternalCompilationInfo( + post_include_bits=[""" +RPY_EXTERN void rpy_vtune_register(char *, long, long); +"""], + include_dirs=["/opt/intel/vtune_amplifier_xe/include"], + separate_module_sources=[""" +#include "/opt/intel/vtune_amplifier_xe/sdk/src/ittnotify/jitprofiling.c" + +RPY_EXTERN void rpy_vtune_register(char *funcname, Signed addr, Signed length) +{ + iJIT_Method_Load_V2 jmethod = {0}; + + if (iJIT_IsProfilingActive() != iJIT_SAMPLING_ON) { + return; + } + + jmethod.method_id = iJIT_GetNewMethodID(); + jmethod.method_name = funcname; + jmethod.method_load_address = (void *)addr; + jmethod.method_size = length; + jmethod.module_name = "rpython_jit"; + + iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V2, + (void*)&jmethod); +} +"""]) + +rpy_vtune_register = rffi.llexternal( + "rpy_vtune_register", + [rffi.CCHARP, lltype.Signed, lltype.Signed], + lltype.Void, + compilation_info=eci, + _nowrapper=True, + sandboxsafe=True) From pypy.commits at gmail.com Thu Apr 7 09:32:07 2016 From: pypy.commits at gmail.com (florinpapa) Date: Thu, 07 Apr 2016 06:32:07 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (florin, antocuni) Rename resource_warning to track_resources Message-ID: <57066157.6718c20a.eeb2e.fffff786@mx.google.com> Author: Florin Papa Branch: resource_warning Changeset: r83566:c7b7a1ea6010 Date: 2016-04-07 16:31 +0300 http://bitbucket.org/pypy/pypy/changeset/c7b7a1ea6010/ Log: (florin, antocuni) Rename resource_warning to track_resources diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -44,7 +44,7 @@ def __init__(self, space): self.space = space - if self.space.sys.resource_warning_enabled: + if self.space.sys.track_resources: self.w_tb = self.space.format_traceback() def __del__(self): @@ -57,7 +57,7 @@ def destructor(self): assert isinstance(self, W_File) - if self.space.sys.resource_warning_enabled: + if self.space.sys.track_resources: w_repr = self.space.repr(self) str_repr = self.space.str_w(w_repr) w_msg = self.space.wrap("WARNING: unclosed file: " + str_repr) diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -266,15 +266,15 @@ if '__pypy__' in sys.builtin_module_names: assert repr(self.temppath) in g.getvalue() - def test_resource_warning(self): + def test_track_resources(self): import os, gc, sys, cStringIO import re if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") def fn(flag1, flag2): - sys.pypy_set_resource_warning(flag1) + sys.pypy_set_track_resources(flag1) f = self.file(self.temppath, 'w') - sys.pypy_set_resource_warning(flag2) + sys.pypy_set_track_resources(flag2) g = cStringIO.StringIO() preverr = sys.stderr try: @@ -283,24 +283,24 @@ gc.collect() # force __del__ to be called finally: sys.stderr = preverr - sys.pypy_set_resource_warning(False) + sys.pypy_set_track_resources(False) return g.getvalue() - # check with resource_warning disabled + # check with track_resources disabled assert fn(False, False) == "" # - # check with resource_warning enabled + # check with track_resources enabled msg = fn(True, True) assert self.regex_search(r""" WARNING: unclosed file: Created at \(most recent call last\): - File ".*", line .*, in test_resource_warning + File ".*", line .*, in test_track_resources File ".*", line .*, in fn File ".*", line .*, in anonymous """, msg) # - # check with resource_warning enabled in the destructor BUT with a - # file which was created when resource_warning was disabled + # check with track_resources enabled in the destructor BUT with a + # file which was created when track_resources was disabled msg = fn(False, True) assert self.regex_search("WARNING: unclosed file: ", msg) assert "Created at" not in msg diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -19,7 +19,7 @@ self.defaultencoding = "ascii" self.filesystemencoding = None self.debug = True - self.resource_warning_enabled = False + self.track_resources = False interpleveldefs = { '__name__' : '(space.wrap("sys"))', @@ -54,7 +54,7 @@ '_current_frames' : 'currentframes._current_frames', 'setrecursionlimit' : 'vm.setrecursionlimit', 'getrecursionlimit' : 'vm.getrecursionlimit', - 'pypy_set_resource_warning' : 'vm.set_resource_warning', + 'pypy_set_track_resources' : 'vm.set_track_resources', 'setcheckinterval' : 'vm.setcheckinterval', 'getcheckinterval' : 'vm.getcheckinterval', 'exc_info' : 'vm.exc_info', diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -65,8 +65,8 @@ return space.wrap(space.sys.recursionlimit) @unwrap_spec(flag=bool) -def set_resource_warning(space, flag): - space.sys.resource_warning_enabled = flag +def set_track_resources(space, flag): + space.sys.track_resources = flag @unwrap_spec(interval=int) def setcheckinterval(space, interval): From pypy.commits at gmail.com Thu Apr 7 10:10:55 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 07:10:55 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: don't import re, it's not needed Message-ID: <57066a6f.c11a1c0a.b31e5.7e0a@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83567:17a9af6e11a3 Date: 2016-04-07 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/17a9af6e11a3/ Log: don't import re, it's not needed diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -268,7 +268,6 @@ def test_track_resources(self): import os, gc, sys, cStringIO - import re if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") def fn(flag1, flag2): From pypy.commits at gmail.com Thu Apr 7 10:10:57 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 07:10:57 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: enable resource-tracking for socket objects Message-ID: <57066a71.d3301c0a.a814a.34fd@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83568:777b0214d3a8 Date: 2016-04-07 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/777b0214d3a8/ Log: enable resource-tracking for socket objects diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -1044,4 +1044,3 @@ # assert it did not crash finally: sys.path[:] = old_sys_path - diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -11,6 +11,16 @@ return file """) +# the following function is used e.g. in test_resource_warning + at unwrap_spec(regex=str, s=str) +def regex_search(space, regex, s): + import re + import textwrap + regex = textwrap.dedent(regex).strip() + m = re.search(regex, s) + m = bool(m) + return space.wrap(m) + class AppTestFile(object): spaceconfig = dict(usemodules=("_file",)) @@ -18,16 +28,6 @@ cls.w_temppath = cls.space.wrap( str(py.test.ensuretemp("fileimpl").join("foo.txt"))) cls.w_file = getfile(cls.space) - # - # the following function is used e.g. in test_resource_warning - @unwrap_spec(regex=str, s=str) - def regex_search(space, regex, s): - import re - import textwrap - regex = textwrap.dedent(regex).strip() - m = re.search(regex, s) - m = bool(m) - return space.wrap(m) cls.w_regex_search = cls.space.wrap(interp2app(regex_search)) def test_simple(self): diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -154,9 +154,28 @@ class W_Socket(W_Root): + w_tb = None # String representation of the traceback at creation time + def __init__(self, space, sock): + self.space = space self.sock = sock register_socket(space, sock) + if self.space.sys.track_resources: + self.w_tb = self.space.format_traceback() + + def __del__(self): + is_open = self.sock.fd >= 0 + if is_open and self.space.sys.track_resources: + self.enqueue_for_destruction(self.space, W_Socket.destructor, + '__del__ method of ') + + def destructor(self): + assert isinstance(self, W_Socket) + if self.space.sys.track_resources: + w_repr = self.space.repr(self) + str_repr = self.space.str_w(w_repr) + w_msg = self.space.wrap("WARNING: unclosed " + str_repr) + self.space.resource_warning(w_msg, self.w_tb) def get_type_w(self, space): return space.wrap(self.sock.type) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -1,6 +1,8 @@ import sys, os import py from pypy.tool.pytest.objspace import gettestobjspace +from pypy.interpreter.gateway import interp2app +from pypy.module._file.test.test_file import regex_search from rpython.tool.udir import udir from rpython.rlib import rsocket from rpython.rtyper.lltypesystem import lltype, rffi @@ -314,6 +316,7 @@ def setup_class(cls): cls.space = space cls.w_udir = space.wrap(str(udir)) + cls.w_regex_search = space.wrap(interp2app(regex_search)) def teardown_class(cls): if not cls.runappdirect: @@ -402,6 +405,64 @@ if os.name != 'nt': raises(OSError, os.close, fileno) + def test_socket_track_resources(self): + import _socket, os, gc, sys, cStringIO + s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + fileno = s.fileno() + assert s.fileno() >= 0 + s.close() + assert s.fileno() < 0 + s.close() + if os.name != 'nt': + raises(OSError, os.close, fileno) + + def test_track_resources(self): + import os, gc, sys, cStringIO + import _socket + if '__pypy__' not in sys.builtin_module_names: + skip("pypy specific test") + # + def fn(flag1, flag2, do_close=False): + sys.pypy_set_track_resources(flag1) + mysock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + sys.pypy_set_track_resources(flag2) + buf = cStringIO.StringIO() + preverr = sys.stderr + try: + sys.stderr = buf + if do_close: + mysock.close() + del mysock + gc.collect() # force __del__ to be called + finally: + sys.stderr = preverr + sys.pypy_set_track_resources(False) + return buf.getvalue() + + # check with track_resources disabled + assert fn(False, False) == "" + # + # check that we don't get the warning if we actually closed the socket + msg = fn(True, True, do_close=True) + assert msg == '' + # + # check with track_resources enabled + msg = fn(True, True) + assert self.regex_search(r""" + WARNING: unclosed + Created at \(most recent call last\): + File ".*", line .*, in test_track_resources + File ".*", line .*, in fn + File ".*", line .*, in anonymous + """, msg) + # + # check with track_resources enabled in the destructor BUT with a + # file which was created when track_resources was disabled + msg = fn(False, True) + assert self.regex_search("WARNING: unclosed ", msg) + assert "Created at" not in msg + + def test_socket_close_error(self): import _socket, os if os.name == 'nt': From pypy.commits at gmail.com Thu Apr 7 10:10:59 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 07:10:59 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: improve the test and check that we don't get the warning if we explicitly close the file Message-ID: <57066a73.4a231c0a.a405e.2800@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83569:3649155837b6 Date: 2016-04-07 16:09 +0200 http://bitbucket.org/pypy/pypy/changeset/3649155837b6/ Log: improve the test and check that we don't get the warning if we explicitly close the file diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -270,24 +270,29 @@ import os, gc, sys, cStringIO if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") - def fn(flag1, flag2): + def fn(flag1, flag2, do_close=False): sys.pypy_set_track_resources(flag1) f = self.file(self.temppath, 'w') sys.pypy_set_track_resources(flag2) - g = cStringIO.StringIO() + buf = cStringIO.StringIO() preverr = sys.stderr try: - sys.stderr = g + sys.stderr = buf + if do_close: + f.close() del f gc.collect() # force __del__ to be called finally: sys.stderr = preverr sys.pypy_set_track_resources(False) - return g.getvalue() + return buf.getvalue() # check with track_resources disabled assert fn(False, False) == "" # + # check that we don't get the warning if we actually close the file + assert fn(False, False, do_close=True) == "" + # # check with track_resources enabled msg = fn(True, True) assert self.regex_search(r""" From pypy.commits at gmail.com Thu Apr 7 11:01:38 2016 From: pypy.commits at gmail.com (florinpapa) Date: Thu, 07 Apr 2016 08:01:38 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (florin, antocuni) Add get_track_resources to retrieve flag value Message-ID: <57067652.839a1c0a.5d8d0.3f57@mx.google.com> Author: Florin Papa Branch: resource_warning Changeset: r83570:5a210502cd24 Date: 2016-04-07 17:59 +0300 http://bitbucket.org/pypy/pypy/changeset/5a210502cd24/ Log: (florin, antocuni) Add get_track_resources to retrieve flag value diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -55,6 +55,7 @@ 'setrecursionlimit' : 'vm.setrecursionlimit', 'getrecursionlimit' : 'vm.getrecursionlimit', 'pypy_set_track_resources' : 'vm.set_track_resources', + 'pypy_get_track_resources' : 'vm.get_track_resources', 'setcheckinterval' : 'vm.setcheckinterval', 'getcheckinterval' : 'vm.getcheckinterval', 'exc_info' : 'vm.exc_info', diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -68,6 +68,9 @@ def set_track_resources(space, flag): space.sys.track_resources = flag +def get_track_resources(space): + return space.wrap(space.sys.track_resources) + @unwrap_spec(interval=int) def setcheckinterval(space, interval): """Tell the Python interpreter to check for asynchronous events every From pypy.commits at gmail.com Thu Apr 7 11:01:40 2016 From: pypy.commits at gmail.com (florinpapa) Date: Thu, 07 Apr 2016 08:01:40 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: (florin, antocuni) Add -X track-resources runtime flag to the interpreter Message-ID: <57067654.6718c20a.eeb2e.1e50@mx.google.com> Author: Florin Papa Branch: resource_warning Changeset: r83571:5ee1622695e2 Date: 2016-04-07 18:00 +0300 http://bitbucket.org/pypy/pypy/changeset/5ee1622695e2/ Log: (florin, antocuni) Add -X track-resources runtime flag to the interpreter diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -22,11 +22,14 @@ -V : print the Python version number and exit (also --version) -W arg : warning control; arg is action:message:category:module:lineno also PYTHONWARNINGS=arg +-X arg : set implementation-specific option file : program read from script file - : program read from stdin (default; interactive mode if a tty) arg ...: arguments passed to program in sys.argv[1:] PyPy options and arguments: --info : print translation information about this PyPy executable +-X track-resources : track the creation of files and sockets and display + a warning if they are not closed explicitly """ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ @@ -223,6 +226,15 @@ import pypyjit pypyjit.set_param(jitparam) +def set_runtime_options(options, Xparam, *args): + if Xparam == 'track-resources': + sys.pypy_set_track_resources(True) + else: + print >> sys.stderr + print >> sys.stderr, 'usage: %s -X [options]' % (get_sys_executable(),) + print >> sys.stderr, '[options] can be: track-resources' + print >> sys.stderr + class CommandLineError(Exception): pass @@ -398,6 +410,7 @@ '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), '-funroll-loops': (funroll_loops, None), + '-X': (set_runtime_options, Ellipsis), '--': (end_options, None), } diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -215,6 +215,13 @@ expected = {"no_user_site": True} self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected) + def test_track_resources(self, monkeypatch): + myflag = [False] + def pypy_set_track_resources(flag): + myflag[0] = flag + monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False) + self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True) + assert myflag[0] == True class TestInteraction: """ @@ -1044,4 +1051,3 @@ # assert it did not crash finally: sys.path[:] = old_sys_path - From pypy.commits at gmail.com Thu Apr 7 11:01:42 2016 From: pypy.commits at gmail.com (florinpapa) Date: Thu, 07 Apr 2016 08:01:42 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: Merge heads Message-ID: <57067656.e5ecc20a.4afe2.1d75@mx.google.com> Author: Florin Papa Branch: resource_warning Changeset: r83572:8aa029604879 Date: 2016-04-07 18:00 +0300 http://bitbucket.org/pypy/pypy/changeset/8aa029604879/ Log: Merge heads diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -11,6 +11,16 @@ return file """) +# the following function is used e.g. in test_resource_warning + at unwrap_spec(regex=str, s=str) +def regex_search(space, regex, s): + import re + import textwrap + regex = textwrap.dedent(regex).strip() + m = re.search(regex, s) + m = bool(m) + return space.wrap(m) + class AppTestFile(object): spaceconfig = dict(usemodules=("_file",)) @@ -18,16 +28,6 @@ cls.w_temppath = cls.space.wrap( str(py.test.ensuretemp("fileimpl").join("foo.txt"))) cls.w_file = getfile(cls.space) - # - # the following function is used e.g. in test_resource_warning - @unwrap_spec(regex=str, s=str) - def regex_search(space, regex, s): - import re - import textwrap - regex = textwrap.dedent(regex).strip() - m = re.search(regex, s) - m = bool(m) - return space.wrap(m) cls.w_regex_search = cls.space.wrap(interp2app(regex_search)) def test_simple(self): @@ -268,27 +268,31 @@ def test_track_resources(self): import os, gc, sys, cStringIO - import re if '__pypy__' not in sys.builtin_module_names: skip("pypy specific test") - def fn(flag1, flag2): + def fn(flag1, flag2, do_close=False): sys.pypy_set_track_resources(flag1) f = self.file(self.temppath, 'w') sys.pypy_set_track_resources(flag2) - g = cStringIO.StringIO() + buf = cStringIO.StringIO() preverr = sys.stderr try: - sys.stderr = g + sys.stderr = buf + if do_close: + f.close() del f gc.collect() # force __del__ to be called finally: sys.stderr = preverr sys.pypy_set_track_resources(False) - return g.getvalue() + return buf.getvalue() # check with track_resources disabled assert fn(False, False) == "" # + # check that we don't get the warning if we actually close the file + assert fn(False, False, do_close=True) == "" + # # check with track_resources enabled msg = fn(True, True) assert self.regex_search(r""" diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -154,9 +154,28 @@ class W_Socket(W_Root): + w_tb = None # String representation of the traceback at creation time + def __init__(self, space, sock): + self.space = space self.sock = sock register_socket(space, sock) + if self.space.sys.track_resources: + self.w_tb = self.space.format_traceback() + + def __del__(self): + is_open = self.sock.fd >= 0 + if is_open and self.space.sys.track_resources: + self.enqueue_for_destruction(self.space, W_Socket.destructor, + '__del__ method of ') + + def destructor(self): + assert isinstance(self, W_Socket) + if self.space.sys.track_resources: + w_repr = self.space.repr(self) + str_repr = self.space.str_w(w_repr) + w_msg = self.space.wrap("WARNING: unclosed " + str_repr) + self.space.resource_warning(w_msg, self.w_tb) def get_type_w(self, space): return space.wrap(self.sock.type) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -1,6 +1,8 @@ import sys, os import py from pypy.tool.pytest.objspace import gettestobjspace +from pypy.interpreter.gateway import interp2app +from pypy.module._file.test.test_file import regex_search from rpython.tool.udir import udir from rpython.rlib import rsocket from rpython.rtyper.lltypesystem import lltype, rffi @@ -314,6 +316,7 @@ def setup_class(cls): cls.space = space cls.w_udir = space.wrap(str(udir)) + cls.w_regex_search = space.wrap(interp2app(regex_search)) def teardown_class(cls): if not cls.runappdirect: @@ -402,6 +405,64 @@ if os.name != 'nt': raises(OSError, os.close, fileno) + def test_socket_track_resources(self): + import _socket, os, gc, sys, cStringIO + s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + fileno = s.fileno() + assert s.fileno() >= 0 + s.close() + assert s.fileno() < 0 + s.close() + if os.name != 'nt': + raises(OSError, os.close, fileno) + + def test_track_resources(self): + import os, gc, sys, cStringIO + import _socket + if '__pypy__' not in sys.builtin_module_names: + skip("pypy specific test") + # + def fn(flag1, flag2, do_close=False): + sys.pypy_set_track_resources(flag1) + mysock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0) + sys.pypy_set_track_resources(flag2) + buf = cStringIO.StringIO() + preverr = sys.stderr + try: + sys.stderr = buf + if do_close: + mysock.close() + del mysock + gc.collect() # force __del__ to be called + finally: + sys.stderr = preverr + sys.pypy_set_track_resources(False) + return buf.getvalue() + + # check with track_resources disabled + assert fn(False, False) == "" + # + # check that we don't get the warning if we actually closed the socket + msg = fn(True, True, do_close=True) + assert msg == '' + # + # check with track_resources enabled + msg = fn(True, True) + assert self.regex_search(r""" + WARNING: unclosed + Created at \(most recent call last\): + File ".*", line .*, in test_track_resources + File ".*", line .*, in fn + File ".*", line .*, in anonymous + """, msg) + # + # check with track_resources enabled in the destructor BUT with a + # file which was created when track_resources was disabled + msg = fn(False, True) + assert self.regex_search("WARNING: unclosed ", msg) + assert "Created at" not in msg + + def test_socket_close_error(self): import _socket, os if os.name == 'nt': From pypy.commits at gmail.com Thu Apr 7 11:52:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 07 Apr 2016 08:52:39 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3 Message-ID: <57068247.838d1c0a.5e41.6322@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83573:3facdbc716c2 Date: 2016-04-07 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/3facdbc716c2/ Log: hg merge rposix-for-3 diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -23,6 +23,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.rposix import ( replace_os_function, handle_posix_error, _as_bytes0) +from rpython.rlib import rposix _WIN32 = sys.platform.startswith('win') _LINUX = sys.platform.startswith('linux') @@ -36,13 +37,12 @@ # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. -if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): - TIMESPEC = platform.Struct('struct timespec', - [('tv_sec', rffi.TIME_T), - ('tv_nsec', rffi.LONG)]) -else: +try: + from rpython.rlib.rposix import TIMESPEC +except ImportError: TIMESPEC = None + # all possible fields - some of them are not available on all platforms ALL_STAT_FIELDS = [ ("st_mode", lltype.Signed), @@ -300,13 +300,6 @@ includes=INCLUDES ) -if TIMESPEC is not None: - class CConfig_for_timespec: - _compilation_info_ = compilation_info - TIMESPEC = TIMESPEC - TIMESPEC = lltype.Ptr( - platform.configure(CConfig_for_timespec)['TIMESPEC']) - def posix_declaration(try_to_add=None): global STAT_STRUCT, STATVFS_STRUCT @@ -322,7 +315,7 @@ if _name == originalname: # replace the 'st_atime' field of type rffi.DOUBLE # with a field 'st_atim' of type 'struct timespec' - lst[i] = (timespecname, TIMESPEC.TO) + lst[i] = (timespecname, TIMESPEC) break _expand(LL_STAT_FIELDS, 'st_atime', 'st_atim') @@ -512,6 +505,23 @@ path = traits.as_str0(path) return win32_xstat(traits, path, traverse=False) +if rposix.HAVE_FSTATAT: + from rpython.rlib.rposix import AT_FDCWD, AT_SYMLINK_NOFOLLOW + c_fstatat = rffi.llexternal('fstatat', + [rffi.INT, rffi.CCHARP, STAT_STRUCT, rffi.INT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, macro=True) + + def fstatat(pathname, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flags = 0 + else: + flags = AT_SYMLINK_NOFOLLOW + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + error = c_fstatat(dir_fd, pathname, stresult, flags) + handle_posix_error('fstatat', error) + return build_stat_result(stresult) + @replace_os_function('fstatvfs') def fstatvfs(fd): with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult: diff --git a/rpython/rlib/test/test_rposix_stat.py b/rpython/rlib/test/test_rposix_stat.py --- a/rpython/rlib/test/test_rposix_stat.py +++ b/rpython/rlib/test/test_rposix_stat.py @@ -56,3 +56,13 @@ except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) rposix_stat.fstatvfs(0) + + at py.test.mark.skipif("not hasattr(rposix_stat, 'fstatat')") +def test_fstatat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + result = rposix_stat.fstatat('file', dir_fd=dirfd, follow_symlinks=False) + finally: + os.close(dirfd) + assert result.st_atime == tmpdir.join('file').atime() From pypy.commits at gmail.com Thu Apr 7 11:52:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 07 Apr 2016 08:52:41 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Add support for dir_fd and follow_symlinks in posix.stat() Message-ID: <57068249.03321c0a.45a15.2976@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83574:ff055a66c820 Date: 2016-04-07 16:51 +0100 http://bitbucket.org/pypy/pypy/changeset/ff055a66c820/ Log: Add support for dir_fd and follow_symlinks in posix.stat() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -354,7 +354,7 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_FSTATAT), follow_symlinks=kwonly(bool)) def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result @@ -371,14 +371,29 @@ link points to. It is an error to use dir_fd or follow_symlinks when specifying path as an open file descriptor.""" - try: - st = dispatch_filename(rposix_stat.stat, 0, - allow_fd_fn=rposix_stat.fstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - else: + if follow_symlinks and dir_fd == DEFAULT_DIR_FD: + try: + st = dispatch_filename(rposix_stat.stat, 0, + allow_fd_fn=rposix_stat.fstat)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_stat_result(space, st) + + if not follow_symlinks and dir_fd == DEFAULT_DIR_FD: + return lstat(space, w_path) + + if rposix.HAVE_FSTATAT: + try: + path = space.fsencode_w(w_path) + st = rposix_stat.fstatat(path, dir_fd, follow_symlinks) + except OSError as e: + raise wrap_oserror2(space, e, w_path) return build_stat_result(space, st) + raise oefmt(space.w_NotImplementedError, + "stat: unsupported argument combination") + @unwrap_spec(dir_fd=DirFD(available=False)) def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): """lstat(path, *, dir_fd=None) -> stat result From pypy.commits at gmail.com Thu Apr 7 11:59:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 07 Apr 2016 08:59:17 -0700 (PDT) Subject: [pypy-commit] pypy default: fix test Message-ID: <570683d5.d2b81c0a.1bb2a.5f7e@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83575:e4e5f038ebbf Date: 2016-04-07 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/e4e5f038ebbf/ Log: fix test diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -104,7 +104,7 @@ rposix.mkdir(filename, 0) assert excinfo.value.errno == errno.EEXIST if sys.platform == 'win32': - assert exc.type is WindowsError + assert excinfo.type is WindowsError @rposix_requires('mkdirat') def test_mkdirat(self): From pypy.commits at gmail.com Thu Apr 7 12:11:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 07 Apr 2016 09:11:11 -0700 (PDT) Subject: [pypy-commit] pypy default: Skip some tests when the function under test is unavailable (e.g. on Windows) Message-ID: <5706869f.cf0b1c0a.6d98.6675@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83576:56eb64398864 Date: 2016-04-07 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/56eb64398864/ Log: Skip some tests when the function under test is unavailable (e.g. on Windows) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -488,6 +488,7 @@ os.close(dirfd) assert not os.path.exists(self.ufilename) + @rposix_requires('utimensat') def test_utimensat(self): def f(dirfd): return rposix.utimensat('test_open_ascii', @@ -499,6 +500,7 @@ finally: os.close(dirfd) + @rposix_requires('fchmodat') def test_fchmodat(self): def f(dirfd): return rposix.fchmodat('test_open_ascii', 0777, dirfd) @@ -529,6 +531,7 @@ compile(f, ()) + at rposix_requires('fdlistdir') def test_fdlistdir(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) @@ -536,6 +539,7 @@ # Note: fdlistdir() always closes dirfd assert result == ['file'] + at rposix_requires('symlinkat') def test_symlinkat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) @@ -545,6 +549,7 @@ finally: os.close(dirfd) + at rposix_requires('renameat') def test_renameat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) From pypy.commits at gmail.com Thu Apr 7 16:19:09 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 13:19:09 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: failing test and corresponding fix Message-ID: <5706c0bd.89cbc20a.418ed.ffffa11a@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83577:9ba84d112325 Date: 2016-04-07 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/9ba84d112325/ Log: failing test and corresponding fix diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1751,11 +1751,20 @@ """) def format_traceback(self): - return self.appexec([], - """(): - import traceback - return "".join(traceback.format_stack()) - """) + # we need to disable track_resources before calling the traceback + # module. Else, it tries to open more files to format the traceback, + # the file constructor will call space.format_traceback etc., in an + # inifite recursion + flag = self.sys.track_resources + self.sys.track_resources = False + try: + return self.appexec([], + """(): + import traceback + return "".join(traceback.format_stack()) + """) + finally: + self.sys.track_resources = flag class AppExecCache(SpaceCache): diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -309,6 +309,27 @@ assert self.regex_search("WARNING: unclosed file: ", msg) assert "Created at" not in msg + def test_track_resources_dont_crash(self): + import os, gc, sys, cStringIO + if '__pypy__' not in sys.builtin_module_names: + skip("pypy specific test") + # + # try hard to create a code object whose co_filename points to an + # EXISTING file, so that traceback.py tries to open it when formatting + # the stacktrace + f = open(self.temppath, 'w') + f.close() + co = compile('open("%s")' % self.temppath, self.temppath, 'exec') + sys.pypy_set_track_resources(True) + try: + # this exec used to fail, because space.format_traceback tried to + # recurively open a file, causing an infinite recursion. For the + # purpose of this test, it is enough that it actually finishes + # without errors + exec co + finally: + sys.pypy_set_track_resources(False) + def test_truncate(self): f = self.file(self.temppath, "w") f.write("foo") From pypy.commits at gmail.com Thu Apr 7 16:19:11 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 13:19:11 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: don't show the 'anonymous' frame corresponding to the appexec when calling format_traceback Message-ID: <5706c0bf.070d1c0a.958c1.ffffc256@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83578:005ca768df30 Date: 2016-04-07 22:18 +0200 http://bitbucket.org/pypy/pypy/changeset/005ca768df30/ Log: don't show the 'anonymous' frame corresponding to the appexec when calling format_traceback diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1760,8 +1760,11 @@ try: return self.appexec([], """(): - import traceback - return "".join(traceback.format_stack()) + import sys, traceback + # the "1" is because we don't want to show THIS code + # object in the traceback + f = sys._getframe(1) + return "".join(traceback.format_stack(f)) """) finally: self.sys.track_resources = flag diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -427,3 +427,28 @@ space.finish() # assert that we reach this point without getting interrupted # by the OperationError(NameError) + + def test_format_traceback(self): + from pypy.tool.pytest.objspace import maketestobjspace + from pypy.interpreter.gateway import interp2app + # + def format_traceback(space): + return space.format_traceback() + # + space = maketestobjspace() + w_format_traceback = space.wrap(interp2app(format_traceback)) + w_tb = space.appexec([w_format_traceback], """(format_traceback): + def foo(): + return bar() + def bar(): + return format_traceback() + return foo() + """) + tb = space.str_w(w_tb) + expected = '\n'.join([ + ' File "?", line 6, in anonymous', # this is the appexec code object + ' File "?", line 3, in foo', + ' File "?", line 5, in bar', + '' + ]) + assert tb == expected From pypy.commits at gmail.com Thu Apr 7 17:55:00 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 14:55:00 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: don't crash if we enable track_resources at the early startup (in particular, if we pass the -X track-resources option). It is a bit impossible to write an app-level test for it because we always have a bottom frame in that case Message-ID: <5706d734.c50a1c0a.a859d.009b@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83579:e1647e262594 Date: 2016-04-07 21:54 +0000 http://bitbucket.org/pypy/pypy/changeset/e1647e262594/ Log: don't crash if we enable track_resources at the early startup (in particular, if we pass the -X track-resources option). It is a bit impossible to write an app-level test for it because we always have a bottom frame in that case diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1763,7 +1763,12 @@ import sys, traceback # the "1" is because we don't want to show THIS code # object in the traceback - f = sys._getframe(1) + try: + f = sys._getframe(1) + except ValueError: + # this happens if you call format_traceback at the very beginning + # of startup, when there is no bottom code object + return '' return "".join(traceback.format_stack(f)) """) finally: From pypy.commits at gmail.com Thu Apr 7 17:55:48 2016 From: pypy.commits at gmail.com (antocuni) Date: Thu, 07 Apr 2016 14:55:48 -0700 (PDT) Subject: [pypy-commit] pypy resource_warning: exit if you don't specify the correct -X option; in theory you should continue and put it in sys._Xoptions, but since it's not implemented it's better to just exit for now Message-ID: <5706d764.91d31c0a.d66e.7269@mx.google.com> Author: Antonio Cuni Branch: resource_warning Changeset: r83580:5de70e252eef Date: 2016-04-07 23:00 +0200 http://bitbucket.org/pypy/pypy/changeset/5de70e252eef/ Log: exit if you don't specify the correct -X option; in theory you should continue and put it in sys._Xoptions, but since it's not implemented it's better to just exit for now diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -26,6 +26,7 @@ file : program read from script file - : program read from stdin (default; interactive mode if a tty) arg ...: arguments passed to program in sys.argv[1:] + PyPy options and arguments: --info : print translation information about this PyPy executable -X track-resources : track the creation of files and sockets and display @@ -230,10 +231,9 @@ if Xparam == 'track-resources': sys.pypy_set_track_resources(True) else: - print >> sys.stderr print >> sys.stderr, 'usage: %s -X [options]' % (get_sys_executable(),) print >> sys.stderr, '[options] can be: track-resources' - print >> sys.stderr + raise SystemExit class CommandLineError(Exception): pass From pypy.commits at gmail.com Fri Apr 8 03:49:54 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Apr 2016 00:49:54 -0700 (PDT) Subject: [pypy-commit] pypy vtune: very, very, very minimal vtune binding Message-ID: <570762a2.8c8a1c0a.a1909.ffff82cb@mx.google.com> Author: Armin Rigo Branch: vtune Changeset: r83581:2ca9d92152f1 Date: 2016-04-08 10:48 +0300 http://bitbucket.org/pypy/pypy/changeset/2ca9d92152f1/ Log: very, very, very minimal vtune binding diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -164,7 +164,8 @@ self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats) mc.RET() - self._frame_realloc_slowpath = mc.materialize(self.cpu, []) + self._frame_realloc_slowpath = self.materialize(mc, [], + "frame_realloc") def _build_cond_call_slowpath(self, supports_floats, callee_only): """ This builds a general call slowpath, for whatever call happens to @@ -200,7 +201,7 @@ self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) mc.RET() - return mc.materialize(self.cpu, []) + return self.materialize(mc, [], "cond_call") def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. @@ -287,7 +288,7 @@ mc.ADD_ri(esp.value, WORD) mc.JMP(imm(self.propagate_exception_path)) # - rawstart = mc.materialize(self.cpu, []) + rawstart = self.materialize(mc, [], "malloc") return rawstart def _build_propagate_exception_path(self): @@ -308,7 +309,7 @@ self.mc.MOV(RawEbpLoc(ofs), imm(propagate_exception_descr)) # self._call_footer() - rawstart = self.mc.materialize(self.cpu, []) + rawstart = self.materialize(self.mc, [], "propagate_exception") self.propagate_exception_path = rawstart self.mc = None @@ -356,7 +357,7 @@ mc.ADD_ri(esp.value, WORD) mc.JMP(imm(self.propagate_exception_path)) # - rawstart = mc.materialize(self.cpu, []) + rawstart = self.materialize(mc, [], "stack_check") self.stack_check_slowpath = rawstart def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): @@ -457,7 +458,7 @@ mc.LEA_rs(esp.value, 7 * WORD) mc.RET() - rawstart = mc.materialize(self.cpu, []) + rawstart = self.materialize(mc, [], "write_barrier") if for_frame: self.wb_slowpath[4] = rawstart else: @@ -790,13 +791,22 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def materialize(self, mc, allblocks, funcname, gcrootmap=None): + from rpython.jit.backend.x86.vtune import rpy_vtune_register + size = mc.get_relative_pos() + rawstart = mc.materialize(self.cpu, allblocks, gcrootmap=gcrootmap) + with rffi.scoped_str2charp("rpyjit." + funcname) as p: + rpy_vtune_register(p, rawstart, size) + return rawstart + def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None allblocks = self.get_asmmemmgr_blocks(looptoken) size = self.mc.get_relative_pos() - res = self.mc.materialize(self.cpu, allblocks, - self.cpu.gc_ll_descr.gcrootmap) + res = self.materialize(self.mc, allblocks, + 'loop%d' % (looptoken.number,), + gcrootmap=self.cpu.gc_ll_descr.gcrootmap) if self.cpu.HAS_CODEMAP: self.cpu.codemap.register_codemap( self.codemap_builder.get_final_bytecode(res, size)) @@ -1961,7 +1971,7 @@ # now we return from the complete frame, which starts from # _call_header_with_stack_check(). The _call_footer below does it. self._call_footer() - rawstart = mc.materialize(self.cpu, []) + rawstart = self.materialize(mc, [], "failure_recovery") self.failure_recovery_code[exc + 2 * withfloats] = rawstart self.mc = None diff --git a/rpython/jit/backend/x86/vtune.py b/rpython/jit/backend/x86/vtune.py --- a/rpython/jit/backend/x86/vtune.py +++ b/rpython/jit/backend/x86/vtune.py @@ -14,7 +14,7 @@ separate_module_sources=[""" #include "/opt/intel/vtune_amplifier_xe/sdk/src/ittnotify/jitprofiling.c" -RPY_EXTERN void rpy_vtune_register(char *funcname, Signed addr, Signed length) +RPY_EXTERN void rpy_vtune_register(char *funcname, Signed addr, Signed size) { iJIT_Method_Load_V2 jmethod = {0}; @@ -25,8 +25,8 @@ jmethod.method_id = iJIT_GetNewMethodID(); jmethod.method_name = funcname; jmethod.method_load_address = (void *)addr; - jmethod.method_size = length; - jmethod.module_name = "rpython_jit"; + jmethod.method_size = size; + jmethod.module_name = "rpyjit"; iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED_V2, (void*)&jmethod); From pypy.commits at gmail.com Fri Apr 8 03:56:45 2016 From: pypy.commits at gmail.com (Raemi) Date: Fri, 08 Apr 2016 00:56:45 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: make perf map work for "perf top" Message-ID: <5707643d.a8c0c20a.dabbd.326c@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83582:075e0be3c2d2 Date: 2016-04-08 10:55 +0300 http://bitbucket.org/pypy/pypy/changeset/075e0be3c2d2/ Log: make perf map work for "perf top" diff --git a/rpython/jit/backend/x86/perf_map.py b/rpython/jit/backend/x86/perf_map.py --- a/rpython/jit/backend/x86/perf_map.py +++ b/rpython/jit/backend/x86/perf_map.py @@ -24,6 +24,7 @@ fprintf(pypy_perf_map_file, "%lx %lx %s\n", start_addr, end_addr - start_addr, name); + fflush(pypy_perf_map_file); } """]) diff --git a/rpython/tool/perf-disassemble.sh b/rpython/tool/perf-disassemble.sh new file mode 100755 --- /dev/null +++ b/rpython/tool/perf-disassemble.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# +# Using this script instead of objdump enables perf to disassemble +# and annotate any JIT code (given a symbol file). +# +# To run perf without root: +# kernel.perf_event_paranoid = -1 +# To trace a process without root: +# kernel.yama.ptrace_scope = 0 +# +# Example usage: +# $ dolphin-emu -P /tmp -b -e $game +# $ perf top -p $(pidof dolphin-emu) --objdump ./Tools/perf-disassemble.sh + +flavor=att +raw=r +src= + + +[[ "${@: -1}" != /tmp/perf-*.map ]] && { objdump "$@"; exit; } + +pid=0 +start=0 +stop=0 + +for a in "$@"; do + case "$a" in + /tmp/perf-*.map) + pid="${a#/tmp/perf-}" + pid="${pid%.map}" + shift + ;; + -M | --no-show-raw | -S | -C | -l | -d) + shift + ;; + --start-address=*) + start="${a##--start-address=}" + shift + ;; + --stop-address=*) + stop="${a##--stop-address=}" + shift + ;; + -*) + echo "Unknown parameter '$1'" >&2 + exit 1 + ;; + esac +done +gdb -q -p $pid -ex "set disassembly $flavor" -ex "disas /$raw$src $start,$stop" -ex q -batch From pypy.commits at gmail.com Fri Apr 8 04:26:29 2016 From: pypy.commits at gmail.com (Raemi) Date: Fri, 08 Apr 2016 01:26:29 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: improve perf disassemble script Message-ID: <57076b35.8d571c0a.169ec.ffff9032@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83583:27f7b74d3121 Date: 2016-04-08 11:25 +0300 http://bitbucket.org/pypy/pypy/changeset/27f7b74d3121/ Log: improve perf disassemble script diff --git a/rpython/tool/perf-disassemble.sh b/rpython/tool/perf-disassemble.sh --- a/rpython/tool/perf-disassemble.sh +++ b/rpython/tool/perf-disassemble.sh @@ -12,10 +12,12 @@ # $ dolphin-emu -P /tmp -b -e $game # $ perf top -p $(pidof dolphin-emu) --objdump ./Tools/perf-disassemble.sh -flavor=att -raw=r +flavor=intel +#raw=r +raw= src= +echo $@ > ~/bla [[ "${@: -1}" != /tmp/perf-*.map ]] && { objdump "$@"; exit; } From pypy.commits at gmail.com Fri Apr 8 04:35:38 2016 From: pypy.commits at gmail.com (Raemi) Date: Fri, 08 Apr 2016 01:35:38 -0700 (PDT) Subject: [pypy-commit] pypy stmgc-c8: improve perf script some more Message-ID: <57076d5a.14151c0a.9e431.ffff9497@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r83584:17de67ace831 Date: 2016-04-08 11:34 +0300 http://bitbucket.org/pypy/pypy/changeset/17de67ace831/ Log: improve perf script some more diff --git a/rpython/tool/perf-disassemble.sh b/rpython/tool/perf-disassemble.sh --- a/rpython/tool/perf-disassemble.sh +++ b/rpython/tool/perf-disassemble.sh @@ -49,4 +49,4 @@ ;; esac done -gdb -q -p $pid -ex "set disassembly $flavor" -ex "disas /$raw$src $start,$stop" -ex q -batch +gdb -q -p $pid -ex "set disassembly $flavor" -ex "disas /$raw$src $start,$stop" -ex q -batch | sed "s/=>/ /g" From pypy.commits at gmail.com Fri Apr 8 05:15:17 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Apr 2016 02:15:17 -0700 (PDT) Subject: [pypy-commit] pypy vtune: send vtune info only after we patch the generated code's conditional jumps Message-ID: <570776a5.c1621c0a.b5841.ffffa543@mx.google.com> Author: Armin Rigo Branch: vtune Changeset: r83585:594500098380 Date: 2016-04-08 12:14 +0300 http://bitbucket.org/pypy/pypy/changeset/594500098380/ Log: send vtune info only after we patch the generated code's conditional jumps diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -537,6 +537,7 @@ name=loopname, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) + self.materialize_done(rawstart, full_size, "loop%d" % looptoken.number) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -592,6 +593,8 @@ ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) + self.materialize_done(rawstart, fullsize, + "loop%d" % original_loop_token.number) self.teardown() # oprofile support if self.cpu.profile_agent is not None: @@ -650,11 +653,14 @@ self.mc.JMP_l(0) self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) + fullsize = self.mc.get_relative_pos() rawstart = self.materialize_loop(looptoken) # update the jump (above) to the real trace self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + self.materialize_done(rawstart, fullsize, + "loop%d" % looptoken.number) def _patch_jump_to(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 @@ -791,12 +797,15 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def materialize_done(self, rawstart, size, funcname): + from rpython.jit.backend.x86.vtune import rpy_vtune_register + with rffi.scoped_str2charp("rpyjit." + funcname) as p: + rpy_vtune_register(p, rawstart, size) + def materialize(self, mc, allblocks, funcname, gcrootmap=None): - from rpython.jit.backend.x86.vtune import rpy_vtune_register size = mc.get_relative_pos() rawstart = mc.materialize(self.cpu, allblocks, gcrootmap=gcrootmap) - with rffi.scoped_str2charp("rpyjit." + funcname) as p: - rpy_vtune_register(p, rawstart, size) + self.materialize_done(rawstart, size, funcname) return rawstart def materialize_loop(self, looptoken): @@ -804,9 +813,8 @@ self.datablockwrapper = None allblocks = self.get_asmmemmgr_blocks(looptoken) size = self.mc.get_relative_pos() - res = self.materialize(self.mc, allblocks, - 'loop%d' % (looptoken.number,), - gcrootmap=self.cpu.gc_ll_descr.gcrootmap) + res = self.mc.materialize(self.cpu, allblocks, + gcrootmap=self.cpu.gc_ll_descr.gcrootmap) if self.cpu.HAS_CODEMAP: self.cpu.codemap.register_codemap( self.codemap_builder.get_final_bytecode(res, size)) From pypy.commits at gmail.com Fri Apr 8 06:55:25 2016 From: pypy.commits at gmail.com (palecsandru) Date: Fri, 08 Apr 2016 03:55:25 -0700 (PDT) Subject: [pypy-commit] pypy fix-jitlog: (edd, palecsandru): Fixed the shown offset values in the jitlog Message-ID: <57078e1d.88c8c20a.221a2.7aaa@mx.google.com> Author: Alecsandru Patrascu Branch: fix-jitlog Changeset: r83586:68721543f4ba Date: 2016-04-08 13:52 +0300 http://bitbucket.org/pypy/pypy/changeset/68721543f4ba/ Log: (edd, palecsandru): Fixed the shown offset values in the jitlog diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -195,7 +195,8 @@ if ops_offset is None: offset = -1 else: - offset = ops_offset.get(op, -1) + final_op = op.get_box_replacement() + offset = ops_offset.get(final_op, -1) if offset == -1: s_offset = "" else: From pypy.commits at gmail.com Fri Apr 8 08:25:16 2016 From: pypy.commits at gmail.com (vext01) Date: Fri, 08 Apr 2016 05:25:16 -0700 (PDT) Subject: [pypy-commit] pypy fix-jitlog: Test the JIT logger deals with forwarding pointers. Message-ID: <5707a32c.06b01c0a.a478c.ffffef3e@mx.google.com> Author: Edd Barrett Branch: fix-jitlog Changeset: r83587:c5673d3cd300 Date: 2016-04-08 13:24 +0100 http://bitbucket.org/pypy/pypy/changeset/c5673d3cd300/ Log: Test the JIT logger deals with forwarding pointers. diff --git a/rpython/jit/metainterp/test/test_logger.py b/rpython/jit/metainterp/test/test_logger.py --- a/rpython/jit/metainterp/test/test_logger.py +++ b/rpython/jit/metainterp/test/test_logger.py @@ -242,3 +242,51 @@ +30: jump(i4) +40: --end of the loop-- """.strip() + + def test_ops_offset_with_forward(self): + inp = ''' + [i0] + i1 = int_add(i0, 4) + i2 = int_mul(i0, 8) + jump(i2) + ''' + loop = pure_parse(inp) + ops = loop.operations + + # again to get new ops with different identities to existing ones + loop2 = pure_parse(inp) + ops2 = loop.operations + + # Suppose a re-write occurs which replaces the operations with these. + # The add 4 became a sub -4. The others are the same, but have a + # different address, thus still require forwarding. + inp2 = ''' + [i0] + i1 = int_sub(i0, -4) + i2 = int_mul(i0, 8) + jump(i2) + ''' + loop2 = pure_parse(inp2) + ops2 = loop2.operations + + # Add forwarding + for i in xrange(3): + ops[i].set_forwarded(ops2[i]) + + # So the offsets are keyed by ops2 instances + ops_offset = { + ops2[0]: 10, + ops2[1]: 20, + ops2[2]: 30, + None: 40 + } + + logger = Logger(self.make_metainterp_sd()) + output = logger.log_loop(loop, ops_offset=ops_offset, name="foo") + + # The logger should have followed the forwarding pointers + lines = output.strip().splitlines() + assert lines[2].startswith("+10") + assert lines[3].startswith("+20") + assert lines[4].startswith("+30") + assert lines[5].startswith("+40") From pypy.commits at gmail.com Fri Apr 8 08:44:47 2016 From: pypy.commits at gmail.com (Diana Popa) Date: Fri, 08 Apr 2016 05:44:47 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: (diana, richard) added tests for jitlog to encode debug_merge_point and also the implementation Message-ID: <5707a7bf.a12dc20a.361e2.7d6f@mx.google.com> Author: Diana Popa Branch: new-jit-log Changeset: r83588:4945fd3dc5ba Date: 2016-04-08 15:43 +0300 http://bitbucket.org/pypy/pypy/changeset/4945fd3dc5ba/ Log: (diana, richard) added tests for jitlog to encode debug_merge_point and also the implementation diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,5 +1,6 @@ from rpython.rlib.rvmprof.rvmprof import cintf from rpython.jit.metainterp import resoperation as resoperations +from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import ConstInt, ConstFloat from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -30,6 +31,7 @@ MARK_JIT_ENTRY_COUNTER = 0x22 MARK_JITLOG_HEADER = 0x23 +MARK_JITLOG_DEBUG_MERGE_POINT = 0x24 IS_32_BIT = sys.maxint == 2**31-1 @@ -179,12 +181,26 @@ le_addr2 = encode_le_addr(absaddr + rel) log._write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) for i,op in enumerate(ops): + if rop.DEBUG_MERGE_POINT == op.getopnum(): + self.encode_debug_info(op) + continue mark, line = self.encode_op(op) log._write_marked(mark, line) self.write_core_dump(ops, i, op, ops_offset) self.memo = {} + def encode_debug_info(self, op): + log = self.logger + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + file_name, bytecode, line_number = jd_sd.warmstate.get_location_str(op.getarg(2)) + line = [] + line.append(encode_str(file_name)) + line.append(encode_str(bytecode)) + line.append(encode_str(line_number)) + log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, ''.join(line)) + + def encode_op(self, op): """ an operation is written as follows: \ diff --git a/rpython/jit/metainterp/test/test_jitlog.py b/rpython/jit/metainterp/test/test_jitlog.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_jitlog.py @@ -0,0 +1,44 @@ +from rpython.jit.tool.oparser import pure_parse +from rpython.jit.metainterp import jitlog +from rpython.jit.metainterp.optimizeopt.util import equaloplists +from rpython.jit.metainterp.resoperation import ResOperation, rop +from rpython.jit.backend.model import AbstractCPU +from rpython.jit.metainterp.history import ConstInt, ConstPtr +import tempfile + +class TestLogger(object): + + def make_metainterp_sd(self): + class FakeJitDriver(object): + class warmstate(object): + @staticmethod + def get_location_str(ptr): + if ptr.value == 0: + return ['string', '', ''] + + class FakeMetaInterpSd: + cpu = AbstractCPU() + cpu.ts = None + jitdrivers_sd = [FakeJitDriver()] + def get_name_from_address(self, addr): + return 'Name' + return FakeMetaInterpSd() + + def test_debug_merge_point(self, tmpdir): + logger = jitlog.VMProfJitLogger() + file = tmpdir.join('binary_file') + file.ensure() + fd = file.open('wb') + logger.cintf.jitlog_init(fd.fileno()) + log_trace = logger.log_trace(0, self.make_metainterp_sd(), None) + op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) + log_trace.write([], [op]) + #the next line will close 'fd' + fd.close() + logger.finish() + binary = file.read() + assert binary.startswith(b'\x00\x04\x00\x00\x00loop') + assert binary.endswith(b'\x24\x06\x00\x00\x00string\x00\x00\x00\x00\x00\x00\x00\x00') + + + From pypy.commits at gmail.com Fri Apr 8 09:13:35 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Apr 2016 06:13:35 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: misc stuff Message-ID: <5707ae7f.070d1c0a.338be.044b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5634:771093ae6324 Date: 2016-04-08 16:13 +0300 http://bitbucket.org/pypy/extradoc/changeset/771093ae6324/ Log: misc stuff diff --git a/planning/misc.txt b/planning/misc.txt new file mode 100644 --- /dev/null +++ b/planning/misc.txt @@ -0,0 +1,13 @@ + + + + +minor/major collections: order the visit to the objects by... address? + +make the resizing of dict/lists more GC-aware + +virtualizables are a mess of loads/stores in the jit traces + +modulo is very bad; "x % (2**n)" should be improved even if x might be +negative. Think also about "x % C" for a general C? + From pypy.commits at gmail.com Fri Apr 8 09:26:57 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 08 Apr 2016 06:26:57 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: typo Message-ID: <5707b1a1.c1621c0a.b5841.0506@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83589:1a912486d7ec Date: 2016-04-08 16:23 +0300 http://bitbucket.org/pypy/pypy/changeset/1a912486d7ec/ Log: typo diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -497,7 +497,7 @@ @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, header=None) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) - def slot_tp_new(space, type, w_args, w_kwds): + def slot_tp_new(space, w_type, w_args, w_kwds): return space.call(w_type, w_args, w_kwds) api_func = slot_tp_new.api_func else: From pypy.commits at gmail.com Fri Apr 8 09:26:59 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 08 Apr 2016 06:26:59 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: guess a bit why test_tp_new_in_subclass_of_type recurses? Message-ID: <5707b1a3.c9b0c20a.15d05.fffff63b@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83590:ae1a28f2ca49 Date: 2016-04-08 16:25 +0300 http://bitbucket.org/pypy/pypy/changeset/ae1a28f2ca49/ Log: guess a bit why test_tp_new_in_subclass_of_type recurses? diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -266,7 +266,8 @@ @cpython_api([PyObject, PyObject, PyObject], PyObject, header=None) def tp_new_wrapper(space, self, w_args, w_kwds): - tp_new = rffi.cast(PyTypeObjectPtr, self).c_tp_new + self_pytype = rffi.cast(PyTypeObjectPtr, self) + tp_new = self_pytype.c_tp_new # Check that the user doesn't do something silly and unsafe like # object.__new__(dict). To do this, we check that the most @@ -277,8 +278,13 @@ w_subtype = args_w[0] w_args = space.newtuple(args_w[1:]) - subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype)) try: + subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype)) + if subtype == self_pytype: + print 'recursion detected???' + print 'calling tp_new of %s with %s' % ( + rffi.charp2str(self_pytype.c_tp_name), + rffi.charp2str(subtype.c_tp_name)) w_obj = generic_cpy_call(space, tp_new, subtype, w_args, w_kwds) finally: Py_DecRef(space, w_subtype) From pypy.commits at gmail.com Sat Apr 9 10:17:12 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Apr 2016 07:17:12 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Kill _record_constptrs, no longer used Message-ID: <57090ee8.034cc20a.ef706.59b1@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83591:a18554508f77 Date: 2016-04-09 17:08 +0300 http://bitbucket.org/pypy/pypy/changeset/a18554508f77/ Log: Kill _record_constptrs, no longer used diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -22,38 +22,6 @@ from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo -class MovableObjectTracker(object): - - ptr_array_type = lltype.GcArray(llmemory.GCREF) - ptr_array_gcref = lltype.nullptr(llmemory.GCREF.TO) - - def __init__(self, cpu, const_pointers): - size = len(const_pointers) - # check that there are any moving object (i.e. chaning pointers). - # Otherwise there is no reason for an instance of this class. - assert size > 0 - # - # prepare GC array to hold the pointers that may change - self.ptr_array = lltype.malloc(MovableObjectTracker.ptr_array_type, size) - self.ptr_array_descr = cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - self.ptr_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self.ptr_array) - # use always the same ConstPtr to access the array - # (easer to read JIT trace) - self.const_ptr_gcref_array = ConstPtr(self.ptr_array_gcref) - # - # assign each pointer an index and put the pointer into the GC array. - # as pointers and addresses are not a good key to use before translation - # ConstPtrs are used as the key for the dict. - self._indexes = {} - for index in range(size): - ptr = const_pointers[index] - self._indexes[ptr] = index - self.ptr_array[index] = ptr.value - - def get_array_index(self, const_ptr): - index = self._indexes[const_ptr] - assert const_ptr.value == self.ptr_array[index] - return index # ____________________________________________________________ class GcLLDescription(GcCache): @@ -129,101 +97,11 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, - ops_with_movable_const_ptr, - changeable_const_pointers): - l = None - for i in range(op.numargs()): - v = op.getarg(i) - if isinstance(v, ConstPtr) and bool(v.value): - p = v.value - if not rgc.can_move(p): - gcrefs_output_list.append(p) - else: - if l is None: - l = [i] - else: - l.append(i) - if v not in changeable_const_pointers: - changeable_const_pointers.append(v) - # - if op.is_guard() or op.getopnum() == rop.FINISH: - llref = cast_instance_to_gcref(op.getdescr()) - assert rgc._make_sure_does_not_move(llref) - gcrefs_output_list.append(llref) - # - if l: - ops_with_movable_const_ptr[op] = l - - def _rewrite_changeable_constptrs(self, op, ops_with_movable_const_ptr, moving_obj_tracker): - newops = [] - for arg_i in ops_with_movable_const_ptr[op]: - v = op.getarg(arg_i) - # assert to make sure we got what we expected - assert isinstance(v, ConstPtr) - array_index = moving_obj_tracker.get_array_index(v) - - size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - array_index = array_index * size + offset - args = [moving_obj_tracker.const_ptr_gcref_array, - ConstInt(array_index), - ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_R, args) - newops.append(load_op) - op.setarg(arg_i, load_op) - # - newops.append(op) - return newops - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) newops = rewriter.rewrite(operations, gcrefs_output_list) return newops - XXX # kill the rest - - # the key is an operation that contains a ConstPtr as an argument and - # this ConstPtrs pointer might change as it points to an object that - # can't be made non-moving (e.g. the object is pinned). - ops_with_movable_const_ptr = {} - # - # a list of such not really constant ConstPtrs. - changeable_const_pointers = [] - for op in newops: - # record all GCREFs, because the GC (or Boehm) cannot see them and - # keep them alive if they end up as constants in the assembler. - # If such a GCREF can change and we can't make the object it points - # to non-movable, we have to handle it seperatly. Such GCREF's are - # returned as ConstPtrs in 'changeable_const_pointers' and the - # affected operation is returned in 'op_with_movable_const_ptr'. - # For this special case see 'rewrite_changeable_constptrs'. - self._record_constptrs(op, gcrefs_output_list, - ops_with_movable_const_ptr, changeable_const_pointers) - # - # handle pointers that are not guaranteed to stay the same - if len(ops_with_movable_const_ptr) > 0: - moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers) - # - if not we_are_translated(): - # used for testing - self.last_moving_obj_tracker = moving_obj_tracker - # make sure the array containing the pointers is not collected by - # the GC (or Boehm) - gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) - rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) - - ops = newops - newops = [] - for op in ops: - if op in ops_with_movable_const_ptr: - rewritten_ops = self._rewrite_changeable_constptrs(op, - ops_with_movable_const_ptr, moving_obj_tracker) - newops.extend(rewritten_ops) - else: - newops.append(op) - # - return newops - @specialize.memo() def getframedescrs(self, cpu): descrs = JitFrameDescrs() From pypy.commits at gmail.com Sat Apr 9 10:17:17 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Apr 2016 07:17:17 -0700 (PDT) Subject: [pypy-commit] pypy default: hg merge jit-constptr-2 Message-ID: <57090eed.90051c0a.f7fb1.ffffaaea@mx.google.com> Author: Armin Rigo Branch: Changeset: r83593:a07ab092b64a Date: 2016-04-09 17:14 +0300 http://bitbucket.org/pypy/pypy/changeset/a07ab092b64a/ Log: hg merge jit-constptr-2 Remove the forced minor collection that occurs when rewriting the assembler at the start of the JIT backend. This is done by emitting the ConstPtrs in a separate table, and loading from the table. Gives improved warm-up time and memory usage. Also removes annoying special-purpose code for pinned pointers. diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -14,7 +14,7 @@ CoreRegisterManager, check_imm_arg, VFPRegisterManager, operations as regalloc_operations) from rpython.jit.backend.llsupport import jitframe, rewrite -from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, debug_bridge, BaseAssembler +from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale, valid_addressing_size from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.model import CompiledLoopToken @@ -481,8 +481,9 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.regalloc_push(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + self.load_from_gc_table(r.ip.value, faildescrindex) + self.regalloc_push(r.ip) self.push_gcmap(self.mc, gcmap=guardtok.gcmap, push=True) self.mc.BL(target) return startpos @@ -556,7 +557,7 @@ debug_stop('jit-backend-ops') def _call_header(self): - assert self.mc.currpos() == 0 + # there is the gc table before this point self.gen_func_prolog() def _call_header_with_stack_check(self): @@ -596,20 +597,22 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) + regalloc = Regalloc(assembler=self) + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, looptoken, + allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() + self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - regalloc = Regalloc(assembler=self) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) - loop_head = self.mc.get_relative_pos() looptoken._ll_loop_code = loop_head # @@ -620,9 +623,11 @@ self.write_pending_failure_recoveries() + full_size = self.mc.get_relative_pos() rawstart = self.materialize_loop(looptoken) - looptoken._function_addr = looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos + self.patch_gcref_table(looptoken, rawstart) self.process_pending_guards(rawstart) self.fixup_target_tokens(rawstart) @@ -641,7 +646,13 @@ looptoken.number, loopname, r_uint(rawstart + loop_head), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + loop_head)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") return AsmInfo(ops_offset, rawstart + loop_head, @@ -678,27 +689,43 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() + fullsize = self.mc.get_relative_pos() rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.process_pending_guards(rawstart) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") + # patch the jump from original guard self.patch_trace(faildescr, original_loop_token, - rawstart, regalloc) + rawstart + startpos, regalloc) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) @@ -716,9 +743,53 @@ ops_offset=ops_offset) self.teardown() - debug_bridge(descr_number, rawstart, codeendpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (see load_from_gc_table()) + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + # the gc table is at the start of the machine code. Fill it now + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def load_from_gc_table(self, regnum, index): + """emits either: + LDR Rt, [PC, #offset] if -4095 <= offset + or: + gen_load_int(Rt, offset) + LDR Rt, [PC, Rt] for larger offsets + """ + mc = self.mc + address_in_buffer = index * WORD # at the start of the buffer + offset = address_in_buffer - (mc.get_relative_pos() + 8) # negative + if offset >= -4095: + mc.LDR_ri(regnum, r.pc.value, offset) + else: + # The offset we're loading is negative: right now, + # gen_load_int() will always use exactly + # get_max_size_of_gen_load_int() instructions. No point + # in optimizing in case we get less. Just in case though, + # we check and pad with nops. + extra_bytes = mc.get_max_size_of_gen_load_int() * 2 + offset -= extra_bytes + start = mc.get_relative_pos() + mc.gen_load_int(regnum, offset) + while mc.get_relative_pos() != start + extra_bytes: + mc.NOP() + mc.LDR_rr(regnum, r.pc.value, regnum) def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -929,6 +1000,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def _walk_operations(self, inputargs, operations, regalloc): fcond = c.AL self._regalloc = regalloc diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -35,9 +35,9 @@ class ArmGuardToken(GuardToken): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - offset, guard_opnum, frame_depth, fcond=c.AL): + offset, guard_opnum, frame_depth, faildescrindex, fcond=c.AL): GuardToken.__init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond self.offset = offset @@ -178,6 +178,7 @@ assert isinstance(descr, AbstractFailDescr) gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ArmGuardToken(self.cpu, gcmap, descr, failargs=op.getfailargs(), @@ -185,6 +186,7 @@ offset=offset, guard_opnum=op.getopnum(), frame_depth=frame_depth, + faildescrindex=faildescrindex, fcond=fcond) return token @@ -398,14 +400,13 @@ def emit_op_finish(self, op, arglocs, regalloc, fcond): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs self.store_reg(self.mc, return_val, r.fp, base_ofs) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.gen_load_int(r.ip.value, fail_descr_loc.value) + faildescrindex = self.get_gcref_from_faildescr(op.getdescr()) + self.load_from_gc_table(r.ip.value, faildescrindex) # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: @@ -1035,9 +1036,9 @@ assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(faildescr) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - value = rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr)) - self.mc.gen_load_int(r.ip.value, value) + self.load_from_gc_table(r.ip.value, faildescrindex) self.store_reg(self.mc, r.ip, r.fp, ofs) def _find_nearby_operation(self, delta): @@ -1250,3 +1251,9 @@ self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond + + def emit_op_load_from_gc_table(self, op, arglocs, regalloc, fcond): + res_loc, = arglocs + index = op.getarg(0).getint() + self.load_from_gc_table(res_loc.value, index) + return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1,5 +1,4 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.rlib import rgc from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.jit.backend.llsupport.regalloc import FrameManager, \ RegisterManager, TempVar, compute_vars_longevity, BaseRegalloc, \ @@ -627,16 +626,11 @@ def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def load_condition_into_cc(self, box): @@ -892,6 +886,10 @@ prepare_op_same_as_r = _prepare_op_same_as prepare_op_same_as_f = _prepare_op_same_as + def prepare_op_load_from_gc_table(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + def prepare_op_call_malloc_nursery(self, op, fcond): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -23,10 +23,11 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth): + guard_opnum, frame_depth, faildescrindex): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr + self.faildescrindex = faildescrindex self.failargs = failargs self.fail_locs = fail_locs self.gcmap = self.compute_gcmap(gcmap, failargs, @@ -144,6 +145,22 @@ self.codemap_builder = CodemapBuilder() self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + def setup_gcrefs_list(self, allgcrefs): + self._allgcrefs = allgcrefs + self._allgcrefs_faildescr_next = 0 + + def teardown_gcrefs_list(self): + self._allgcrefs = None + + def get_gcref_from_faildescr(self, descr): + """This assumes that it is called in order for all faildescrs.""" + search = cast_instance_to_gcref(descr) + while not _safe_eq( + self._allgcrefs[self._allgcrefs_faildescr_next], search): + self._allgcrefs_faildescr_next += 1 + assert self._allgcrefs_faildescr_next < len(self._allgcrefs) + return self._allgcrefs_faildescr_next + def set_debug(self, v): r = self._debug self._debug = v @@ -186,8 +203,7 @@ break exc = guardtok.must_save_exception() target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + faildescrindex = guardtok.faildescrindex base_ofs = self.cpu.get_baseofs_of_frame_field() # # in practice, about 2/3rd of 'positions' lists that we build are @@ -229,7 +245,7 @@ self._previous_rd_locs = positions # write down the positions of locs guardtok.faildescr.rd_locs = positions - return fail_descr, target + return faildescrindex, target def enter_portal_frame(self, op): if self.cpu.HAS_CODEMAP: @@ -288,7 +304,7 @@ gcref = cast_instance_to_gcref(value) if gcref: - rgc._make_sure_does_not_move(gcref) + rgc._make_sure_does_not_move(gcref) # but should be prebuilt value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # @@ -451,3 +467,8 @@ r_uint(rawstart + codeendpos))) debug_stop("jit-backend-addr") +def _safe_eq(x, y): + try: + return x == y + except AttributeError: # minor mess + return False diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -22,38 +22,6 @@ from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo -class MovableObjectTracker(object): - - ptr_array_type = lltype.GcArray(llmemory.GCREF) - ptr_array_gcref = lltype.nullptr(llmemory.GCREF.TO) - - def __init__(self, cpu, const_pointers): - size = len(const_pointers) - # check that there are any moving object (i.e. chaning pointers). - # Otherwise there is no reason for an instance of this class. - assert size > 0 - # - # prepare GC array to hold the pointers that may change - self.ptr_array = lltype.malloc(MovableObjectTracker.ptr_array_type, size) - self.ptr_array_descr = cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - self.ptr_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self.ptr_array) - # use always the same ConstPtr to access the array - # (easer to read JIT trace) - self.const_ptr_gcref_array = ConstPtr(self.ptr_array_gcref) - # - # assign each pointer an index and put the pointer into the GC array. - # as pointers and addresses are not a good key to use before translation - # ConstPtrs are used as the key for the dict. - self._indexes = {} - for index in range(size): - ptr = const_pointers[index] - self._indexes[ptr] = index - self.ptr_array[index] = ptr.value - - def get_array_index(self, const_ptr): - index = self._indexes[const_ptr] - assert const_ptr.value == self.ptr_array[index] - return index # ____________________________________________________________ class GcLLDescription(GcCache): @@ -129,96 +97,9 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, - ops_with_movable_const_ptr, - changeable_const_pointers): - l = None - for i in range(op.numargs()): - v = op.getarg(i) - if isinstance(v, ConstPtr) and bool(v.value): - p = v.value - if rgc._make_sure_does_not_move(p): - gcrefs_output_list.append(p) - else: - if l is None: - l = [i] - else: - l.append(i) - if v not in changeable_const_pointers: - changeable_const_pointers.append(v) - # - if op.is_guard() or op.getopnum() == rop.FINISH: - llref = cast_instance_to_gcref(op.getdescr()) - assert rgc._make_sure_does_not_move(llref) - gcrefs_output_list.append(llref) - # - if l: - ops_with_movable_const_ptr[op] = l - - def _rewrite_changeable_constptrs(self, op, ops_with_movable_const_ptr, moving_obj_tracker): - newops = [] - for arg_i in ops_with_movable_const_ptr[op]: - v = op.getarg(arg_i) - # assert to make sure we got what we expected - assert isinstance(v, ConstPtr) - array_index = moving_obj_tracker.get_array_index(v) - - size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - array_index = array_index * size + offset - args = [moving_obj_tracker.const_ptr_gcref_array, - ConstInt(array_index), - ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_R, args) - newops.append(load_op) - op.setarg(arg_i, load_op) - # - newops.append(op) - return newops - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) - newops = rewriter.rewrite(operations) - - # the key is an operation that contains a ConstPtr as an argument and - # this ConstPtrs pointer might change as it points to an object that - # can't be made non-moving (e.g. the object is pinned). - ops_with_movable_const_ptr = {} - # - # a list of such not really constant ConstPtrs. - changeable_const_pointers = [] - for op in newops: - # record all GCREFs, because the GC (or Boehm) cannot see them and - # keep them alive if they end up as constants in the assembler. - # If such a GCREF can change and we can't make the object it points - # to non-movable, we have to handle it seperatly. Such GCREF's are - # returned as ConstPtrs in 'changeable_const_pointers' and the - # affected operation is returned in 'op_with_movable_const_ptr'. - # For this special case see 'rewrite_changeable_constptrs'. - self._record_constptrs(op, gcrefs_output_list, - ops_with_movable_const_ptr, changeable_const_pointers) - # - # handle pointers that are not guaranteed to stay the same - if len(ops_with_movable_const_ptr) > 0: - moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers) - # - if not we_are_translated(): - # used for testing - self.last_moving_obj_tracker = moving_obj_tracker - # make sure the array containing the pointers is not collected by - # the GC (or Boehm) - gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) - rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) - - ops = newops - newops = [] - for op in ops: - if op in ops_with_movable_const_ptr: - rewritten_ops = self._rewrite_changeable_constptrs(op, - ops_with_movable_const_ptr, moving_obj_tracker) - newops.extend(rewritten_ops) - else: - newops.append(op) - # + newops = rewriter.rewrite(operations, gcrefs_output_list) return newops @specialize.memo() @@ -244,6 +125,14 @@ """ return jitframe.JITFRAME.allocate(frame_info) + def make_gcref_tracer(self, array_base_addr, gcrefs): + # for tests, or for Boehm. Overridden for framework GCs + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_boehm_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + pass # nothing needed unless overridden + class JitFrameDescrs: def _freeze_(self): return True @@ -752,6 +641,13 @@ p = rffi.cast(rffi.CCHARP, p) return (ord(p[0]) & IS_OBJECT_FLAG) != 0 + def make_gcref_tracer(self, array_base_addr, gcrefs): + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_framework_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + tracer.array_length = 0 + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -0,0 +1,49 @@ +from rpython.rlib import rgc +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.jit.backend.llsupport.symbolic import WORD + + +GCREFTRACER = lltype.GcStruct( + 'GCREFTRACER', + ('array_base_addr', lltype.Signed), + ('array_length', lltype.Signed), + rtti=True) + +def gcrefs_trace(gc, obj_addr, callback, arg): + obj = llmemory.cast_adr_to_ptr(obj_addr, lltype.Ptr(GCREFTRACER)) + i = 0 + length = obj.array_length + addr = obj.array_base_addr + while i < length: + p = rffi.cast(llmemory.Address, addr + i * WORD) + gc._trace_callback(callback, arg, p) + i += 1 +lambda_gcrefs_trace = lambda: gcrefs_trace + +def make_framework_tracer(array_base_addr, gcrefs): + # careful about the order here: the allocation of the GCREFTRACER + # can trigger a GC. So we must write the gcrefs into the raw + # array only afterwards... + rgc.register_custom_trace_hook(GCREFTRACER, lambda_gcrefs_trace) + length = len(gcrefs) + tr = lltype.malloc(GCREFTRACER) + # --no GC from here-- + tr.array_base_addr = array_base_addr + tr.array_length = length + i = 0 + while i < length: + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + i += 1 + llop.gc_writebarrier(lltype.Void, tr) + # --no GC until here-- + return tr + +def make_boehm_tracer(array_base_addr, gcrefs): + # copy the addresses, but return 'gcrefs' as the object that must be + # kept alive + for i in range(len(gcrefs)): + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + return gcrefs diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -246,6 +246,13 @@ def free_loop_and_bridges(self, compiled_loop_token): AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) + # turn off all gcreftracers + tracers = compiled_loop_token.asmmemmgr_gcreftracers + if tracers is not None: + compiled_loop_token.asmmemmgr_gcreftracers = None + for tracer in tracers: + self.gc_ll_descr.clear_gcref_tracer(tracer) + # then free all blocks of code and raw data blocks = compiled_loop_token.asmmemmgr_blocks if blocks is not None: compiled_loop_token.asmmemmgr_blocks = None diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,10 +1,12 @@ from rpython.rlib import rgc -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, r_dict from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers +from rpython.jit.metainterp.typesystem import rd_eq, rd_hash from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) @@ -94,21 +96,28 @@ op = self.get_box_replacement(op) orig_op = op replaced = False + opnum = op.getopnum() + keep = (opnum == rop.JIT_DEBUG) for i in range(op.numargs()): orig_arg = op.getarg(i) arg = self.get_box_replacement(orig_arg) + if isinstance(arg, ConstPtr) and bool(arg.value) and not keep: + arg = self.remove_constptr(arg) if orig_arg is not arg: if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if rop.is_guard(op.opnum): + if rop.is_guard(opnum): if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) op.setfailargs([self.get_box_replacement(a, True) for a in op.getfailargs()]) + if rop.is_guard(opnum) or opnum == rop.FINISH: + llref = cast_instance_to_gcref(op.getdescr()) + self.gcrefs_output_list.append(llref) self._newops.append(op) def replace_op_with(self, op, newop): @@ -304,13 +313,16 @@ return False - def rewrite(self, operations): + def rewrite(self, operations, gcrefs_output_list): # we can only remember one malloc since the next malloc can possibly # collect; but we can try to collapse several known-size mallocs into # one, both for performance and to reduce the number of write # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + self.gcrefs_output_list = gcrefs_output_list + self.gcrefs_map = None + self.gcrefs_recently_loaded = None operations = self.remove_bridge_exception(operations) self._changed_op = None for i in range(len(operations)): @@ -333,8 +345,7 @@ elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: - self.emitting_an_operation_that_can_collect() - self._known_lengths.clear() + self.emit_label() # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: @@ -940,3 +951,37 @@ operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): return operations[:start] + operations[start+3:] return operations + + def emit_label(self): + self.emitting_an_operation_that_can_collect() + self._known_lengths.clear() + self.gcrefs_recently_loaded = None + + def _gcref_index(self, gcref): + if self.gcrefs_map is None: + self.gcrefs_map = r_dict(rd_eq, rd_hash) + try: + return self.gcrefs_map[gcref] + except KeyError: + pass + index = len(self.gcrefs_output_list) + self.gcrefs_map[gcref] = index + self.gcrefs_output_list.append(gcref) + return index + + def remove_constptr(self, c): + """Remove all ConstPtrs, and replace them with load_from_gc_table. + """ + # Note: currently, gcrefs_recently_loaded is only cleared in + # LABELs. We'd like something better, like "don't spill it", + # but that's the wrong level... + index = self._gcref_index(c.value) + if self.gcrefs_recently_loaded is None: + self.gcrefs_recently_loaded = {} + try: + load_op = self.gcrefs_recently_loaded[index] + except KeyError: + load_op = ResOperation(rop.LOAD_FROM_GC_TABLE, [ConstInt(index)]) + self._newops.append(load_op) + self.gcrefs_recently_loaded[index] = load_op + return load_op diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -196,31 +196,6 @@ assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) - def test_record_constptrs(self): - class MyFakeCPU(object): - def cast_adr_to_int(self, adr): - assert adr == "some fake address" - return 43 - class MyFakeGCRefList(object): - def get_address_of_gcref(self, s_gcref1): - assert s_gcref1 == s_gcref - return "some fake address" - S = lltype.GcStruct('S') - s = lltype.malloc(S) - s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - v_random_box = InputArgRef() - operations = [ - ResOperation(rop.PTR_EQ, [v_random_box, ConstPtr(s_gcref)]), - ] - gc_ll_descr = self.gc_ll_descr - gc_ll_descr.gcrefs = MyFakeGCRefList() - gcrefs = [] - operations = get_deep_immutable_oplist(operations) - operations2 = gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations, - gcrefs) - assert operations2 == operations - assert gcrefs == [s_gcref] - class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -0,0 +1,53 @@ +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.jit.backend.llsupport.gcreftracer import GCREFTRACER, gcrefs_trace +from rpython.jit.backend.llsupport.gcreftracer import make_framework_tracer +from rpython.jit.backend.llsupport.gcreftracer import make_boehm_tracer + + +class FakeGC: + def __init__(self): + self.called = [] + def _trace_callback(self, callback, arg, addr): + assert callback == "callback" + assert arg == "arg" + assert lltype.typeOf(addr) == llmemory.Address + self.called.append(addr) + + +def test_gcreftracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + a[0] = 123 + a[1] = 456 + a[2] = 789 + tr = lltype.malloc(GCREFTRACER) + tr.array_base_addr = base = rffi.cast(lltype.Signed, a) + tr.array_length = 3 + gc = FakeGC() + gcrefs_trace(gc, llmemory.cast_ptr_to_adr(tr), "callback", "arg") + assert len(gc.called) == 3 + WORD = rffi.sizeof(lltype.Signed) + for i in range(3): + assert gc.called[i] == rffi.cast(llmemory.Address, base + i * WORD) + lltype.free(a, flavor='raw') + +def test_make_framework_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + tr = make_framework_tracer(base, [123, 456, 789]) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr.array_base_addr == base + assert tr.array_length == 3 + lltype.free(a, flavor='raw') + +def test_make_boehm_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + lst = [123, 456, 789] + tr = make_boehm_tracer(base, lst) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr is lst + lltype.free(a, flavor='raw') diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ /dev/null @@ -1,149 +0,0 @@ -from test_rewrite import get_size_descr, get_array_descr, get_description, BaseFakeCPU -from rpython.jit.backend.llsupport.descr import get_size_descr,\ - get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescr, get_interiorfield_descr -from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ - GcLLDescr_framework, MovableObjectTracker -from rpython.jit.backend.llsupport import jitframe, gc -from rpython.jit.metainterp.gc import get_description -from rpython.jit.tool.oparser import parse -from rpython.jit.metainterp.optimizeopt.util import equaloplists -from rpython.jit.metainterp.history import JitCellToken, FLOAT -from rpython.rtyper.lltypesystem import lltype, rffi, lltype, llmemory -from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD -from rpython.rlib import rgc - -class Evaluator(object): - def __init__(self, scope): - self.scope = scope - def __getitem__(self, key): - return eval(key, self.scope) - - -class FakeLoopToken(object): - pass - -# The following class is based on rpython.jit.backend.llsupport.test.test_rewrite.RewriteTests. -# It's modified to be able to test the object pinning specific features. -class RewriteTests(object): - def check_rewrite(self, frm_operations, to_operations, **namespace): - # objects to use inside the test - A = lltype.GcArray(lltype.Signed) - adescr = get_array_descr(self.gc_ll_descr, A) - adescr.tid = 4321 - alendescr = adescr.lendescr - # - pinned_obj_type = lltype.GcStruct('PINNED_STRUCT', ('my_int', lltype.Signed)) - pinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, pinned_obj_type, 'my_int') - pinned_obj_ptr = lltype.malloc(pinned_obj_type) - pinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, pinned_obj_ptr) - assert rgc.pin(pinned_obj_gcref) - # - notpinned_obj_type = lltype.GcStruct('NOT_PINNED_STRUCT', ('my_int', lltype.Signed)) - notpinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, notpinned_obj_type, 'my_int') - notpinned_obj_ptr = lltype.malloc(notpinned_obj_type) - notpinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, notpinned_obj_ptr) - # - ptr_array_descr = self.cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - # - vtable_descr = self.gc_ll_descr.fielddescr_vtable - O = lltype.GcStruct('O', ('parent', rclass.OBJECT), - ('x', lltype.Signed)) - o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - # - tiddescr = self.gc_ll_descr.fielddescr_tid - wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] - # - strdescr = self.gc_ll_descr.str_descr - unicodedescr = self.gc_ll_descr.unicode_descr - strlendescr = strdescr.lendescr - unicodelendescr = unicodedescr.lendescr - - casmdescr = JitCellToken() - clt = FakeLoopToken() - clt._ll_initial_locs = [0, 8] - frame_info = lltype.malloc(jitframe.JITFRAMEINFO, flavor='raw') - clt.frame_info = frame_info - frame_info.jfi_frame_depth = 13 - frame_info.jfi_frame_size = 255 - framedescrs = self.gc_ll_descr.getframedescrs(self.cpu) - framelendescr = framedescrs.arraydescr.lendescr - jfi_frame_depth = framedescrs.jfi_frame_depth - jfi_frame_size = framedescrs.jfi_frame_size - jf_frame_info = framedescrs.jf_frame_info - signedframedescr = self.cpu.signedframedescr - floatframedescr = self.cpu.floatframedescr - casmdescr.compiled_loop_token = clt - tzdescr = None # noone cares - # - namespace.update(locals()) - # - for funcname in self.gc_ll_descr._generated_functions: - namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) - namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, - '%s_descr' % funcname) - # - ops = parse(frm_operations, namespace=namespace) - operations = self.gc_ll_descr.rewrite_assembler(self.cpu, - ops.operations, - []) - # make the array containing the GCREF's accessible inside the tests. - # This must be done after we call 'rewrite_assembler'. Before that - # call 'last_moving_obj_tracker' is None or filled with some old - # value. - namespace['ptr_array_gcref'] = self.gc_ll_descr.last_moving_obj_tracker.ptr_array_gcref - expected = parse(to_operations % Evaluator(namespace), - namespace=namespace) - equaloplists(operations, expected.operations) - lltype.free(frame_info, flavor='raw') - -class TestFramework(RewriteTests): - def setup_method(self, meth): - class config_(object): - class translation(object): - gc = 'minimark' - gcrootfinder = 'asmgcc' - gctransformer = 'framework' - gcremovetypeptr = False - gcdescr = get_description(config_) - self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, - really_not_translated=True) - self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( - lambda cpu: True) - # - class FakeCPU(BaseFakeCPU): - def sizeof(self, STRUCT, is_object): - descr = SizeDescr(104) - descr.tid = 9315 - descr.vtable = 12 - return descr - self.cpu = FakeCPU() - - def test_simple_getfield(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 - - def test_simple_getfield_twice(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - i1 = getfield_gc_i(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr) - i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) - p2 = gc_load_r(ConstPtr(ptr_array_gcref), %(1 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import JitCellToken, FLOAT from rpython.jit.metainterp.history import AbstractFailDescr -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rclass from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.llsupport.symbolic import (WORD, @@ -77,6 +77,9 @@ tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + myT = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(T, zero=True)) + self.myT = myT # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) @@ -112,6 +115,12 @@ xdescr = get_field_descr(self.gc_ll_descr, R1, 'x') ydescr = get_field_descr(self.gc_ll_descr, R1, 'y') zdescr = get_field_descr(self.gc_ll_descr, R1, 'z') + myR1 = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) + myR1b = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) + self.myR1 = myR1 + self.myR1b = myR1b # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) @@ -174,9 +183,10 @@ ops = parse(frm_operations, namespace=namespace) expected = parse(to_operations % Evaluator(namespace), namespace=namespace) + self.gcrefs = [] operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, - []) + self.gcrefs) remap = {} for a, b in zip(ops.inputargs, expected.inputargs): remap[b] = a @@ -1281,3 +1291,124 @@ {t} jump() """.format(**locals())) + + def test_load_from_gc_table_1i(self): + self.check_rewrite(""" + [i1] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_1p(self): + self.check_rewrite(""" + [p1] + setfield_gc(ConstPtr(myT), p1, descr=tzdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + cond_call_gc_wb(p0, descr=wbdescr) + gc_store(p0, %(tzdescr.offset)s, i1, %(tzdescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myT] + + def test_load_from_gc_table_2(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + gc_store(p0, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_3(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + label(f2) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + label(f2) + p1 = load_from_gc_table(0) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_4(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1b), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + p1 = load_from_gc_table(1) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1, self.myR1b] + + def test_pinned_simple_getfield(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1] + + def test_pinned_simple_getfield_twice(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + i1 = getfield_gc_i(ConstPtr(myR1b), descr=xdescr) + i2 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + p2 = load_from_gc_table(1) + i1 = gc_load_i(p2, %(xdescr.offset)s, -%(xdescr.field_size)s) + i2 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1, self.myR1b] + + def test_guard_in_gcref(self): + self.check_rewrite(""" + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """, """ + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """) + assert len(self.gcrefs) == 2 diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -285,7 +285,7 @@ class CompiledLoopToken(object): asmmemmgr_blocks = None - asmmemmgr_gcroots = 0 + asmmemmgr_gcreftracers = None def __init__(self, cpu, number): cpu.tracker.total_compiled_loops += 1 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -4,7 +4,7 @@ from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, - DEBUG_COUNTER, debug_bridge) + DEBUG_COUNTER) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.metainterp.history import (Const, VOID, ConstInt) @@ -489,7 +489,6 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: @@ -498,10 +497,13 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -512,6 +514,7 @@ full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) + self.patch_gcref_table(looptoken, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) looptoken._ll_loop_code = looppos + rawstart @@ -520,7 +523,13 @@ looptoken.number, loopname, r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # @@ -530,7 +539,7 @@ looptoken._x86_rawstart = rawstart looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset - looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos if logger: logger.log_loop(inputargs, operations, 0, "rewritten", name=loopname, ops_offset=ops_offset) @@ -563,11 +572,13 @@ 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() self._update_at_exit(arglocs, inputargs, faildescr, regalloc) @@ -577,12 +588,22 @@ fullsize = self.mc.get_relative_pos() # rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) - debug_bridge(descr_number, rawstart, codeendpos) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard - self.patch_jump_for_descr(faildescr, rawstart) + self.patch_jump_for_descr(faildescr, rawstart + startpos) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) @@ -667,6 +688,39 @@ mc.JMP_r(X86_64_SCRATCH_REG.value) mc.copy_to_raw_memory(adr_jump_offset) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + if IS_X86_64: + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (%rip - constant). + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + elif IS_X86_32: + # allocate the gc table right now. This lets us write + # machine code with absolute 32-bit addressing. + self.gc_table_addr = self.datablockwrapper.malloc_aligned( + gcref_table_size, alignment=WORD) + # + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + if IS_X86_64: + # the gc table is at the start of the machine code + self.gc_table_addr = rawstart + elif IS_X86_32: + # the gc table was already allocated by reserve_gcref_table() + rawstart = self.gc_table_addr + # + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -790,6 +844,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None @@ -1368,6 +1428,29 @@ genop_cast_ptr_to_int = _genop_same_as genop_cast_int_to_ptr = _genop_same_as + def _patch_load_from_gc_table(self, index): + # must be called immediately after a "p"-mode instruction + # has been emitted. 64-bit mode only. + assert IS_X86_64 + address_in_buffer = index * WORD # at the start of the buffer + p_location = self.mc.get_relative_pos() + offset = address_in_buffer - p_location + self.mc.overwrite32(p_location-4, offset) + + def _addr_from_gc_table(self, index): + # get the address of the gc table entry 'index'. 32-bit mode only. + assert IS_X86_32 + return self.gc_table_addr + index * WORD + + def genop_load_from_gc_table(self, op, arglocs, resloc): + index = op.getarg(0).getint() + assert isinstance(resloc, RegLoc) + if IS_X86_64: + self.mc.MOV_rp(resloc.value, 0) # %rip-relative + self._patch_load_from_gc_table(index) + elif IS_X86_32: + self.mc.MOV_rj(resloc.value, self._addr_from_gc_table(index)) + def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) self.mov(imm0, resloc) @@ -1843,8 +1926,9 @@ def implement_guard_recovery(self, guard_opnum, faildescr, failargs, fail_locs, frame_depth): gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(faildescr) return GuardToken(self.cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) def generate_propagate_error_64(self): assert WORD == 8 @@ -1862,8 +1946,12 @@ self._update_at_exit(guardtok.fail_locs, guardtok.failargs, guardtok.faildescr, regalloc) # - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.mc.PUSH(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + if IS_X86_64: + self.mc.PUSH_p(0) # %rip-relative + self._patch_load_from_gc_table(faildescrindex) + elif IS_X86_32: + self.mc.PUSH_j(self._addr_from_gc_table(faildescrindex)) self.push_gcmap(self.mc, guardtok.gcmap, push=True) self.mc.JMP(imm(target)) return startpos @@ -1967,17 +2055,24 @@ def genop_finish(self, op, arglocs, result_loc): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs if op.getarg(0).type == FLOAT and not IS_X86_64: size = WORD * 2 else: size = WORD self.save_into_mem(raw_stack(base_ofs), return_val, imm(size)) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mov(fail_descr_loc, RawEbpLoc(ofs)) + + descr = op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(descr) + if IS_X86_64: + self.mc.MOV_rp(eax.value, 0) + self._patch_load_from_gc_table(faildescrindex) + elif IS_X86_32: + self.mc.MOV_rj(eax.value, self._addr_from_gc_table(faildescrindex)) + self.mov(eax, RawEbpLoc(ofs)) + arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: @@ -2047,8 +2142,16 @@ guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr)))) + + faildescrindex = self.get_gcref_from_faildescr(faildescr) + if IS_X86_64: + self.mc.MOV_rp(X86_64_SCRATCH_REG.value, 0) + self._patch_load_from_gc_table(faildescrindex) + self.mc.MOV(raw_stack(ofs), X86_64_SCRATCH_REG) + elif IS_X86_32: + # XXX need a scratch reg here for efficiency; be more clever + self.mc.PUSH_j(self._addr_from_gc_table(faildescrindex)) + self.mc.POP(raw_stack(ofs)) def _find_nearby_operation(self, delta): regalloc = self._regalloc diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -423,16 +423,11 @@ def consider_finish(self, op): # the frame is in ebp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] self.perform(op, locs, None) def consider_guard_no_exception(self, op): @@ -1141,6 +1136,10 @@ consider_same_as_r = _consider_same_as consider_same_as_f = _consider_same_as + def consider_load_from_gc_table(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) + def consider_int_force_ge_zero(self, op): argloc = self.make_sure_var_in_reg(op.getarg(0)) resloc = self.force_allocate_reg(op, [op.getarg(0)]) diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -297,6 +297,20 @@ return encode_abs, argnum, None, None # ____________________________________________________________ +# ***X86_64 only*** +# Emit a mod/rm referencing an address "RIP + immediate_offset". + + at specialize.arg(2) +def encode_rip_offset(mc, immediate, _, orbyte): + assert mc.WORD == 8 + mc.writechar(chr(0x05 | orbyte)) + mc.writeimm32(immediate) + return 0 + +def rip_offset(argnum): + return encode_rip_offset, argnum, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -586,6 +600,8 @@ PUS1_r = insn(rex_nw, register(1), '\x50') PUS1_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) PUS1_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) + PUS1_j = insn(rex_nw, '\xFF', orbyte(6<<3), abs_(1)) + PUS1_p = insn(rex_nw, '\xFF', orbyte(6<<3), rip_offset(1)) PUS1_i8 = insn('\x6A', immediate(1, 'b')) PUS1_i32 = insn('\x68', immediate(1, 'i')) @@ -608,6 +624,14 @@ self.PUS1_i32(immed) self.stack_frame_size_delta(+self.WORD) + def PUSH_j(self, abs_addr): + self.PUS1_j(abs_addr) + self.stack_frame_size_delta(+self.WORD) + + def PUSH_p(self, rip_offset): + self.PUS1_p(rip_offset) + self.stack_frame_size_delta(+self.WORD) + PO1_r = insn(rex_nw, register(1), '\x58') PO1_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) @@ -914,6 +938,7 @@ add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) add_insn('j', abs_(modrm_argnum)) + add_insn('p', rip_offset(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -279,6 +279,8 @@ if modes: tests = self.get_all_tests() m = modes[0] + if m == 'p' and self.WORD == 4: + return [] lst = tests[m]() random.shuffle(lst) if methname == 'PSRAD_xi' and m == 'i': diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -51,3 +51,19 @@ def test_extra_MOV_ri64(self): self.imm32_tests = self.imm64_tests # patch on 'self' self.complete_test('MOV_ri') + + def rip_relative_tests(self): + return [-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255, 0, 127] + + def get_all_tests(self): + d = super(TestRx86_64, self).get_all_tests() + d['p'] = self.rip_relative_tests + return d + + def assembler_operand_rip_relative(self, value): + return '%d(%%rip)' % value + + def get_all_assembler_operands(self): + d = super(TestRx86_64, self).get_all_assembler_operands() + d['p'] = self.assembler_operand_rip_relative + return d diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -408,6 +408,7 @@ rop.GC_LOAD_INDEXED_R, rop.GC_STORE, rop.GC_STORE_INDEXED, + rop.LOAD_FROM_GC_TABLE, ): # list of opcodes never executed by pyjitpl continue if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1056,6 +1056,8 @@ 'UNICODELEN/1/i', 'UNICODEGETITEM/2/i', # + 'LOAD_FROM_GC_TABLE/1/r', # only emitted by rewrite.py + # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- # parameters GC_LOAD From pypy.commits at gmail.com Sat Apr 9 10:17:18 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Apr 2016 07:17:18 -0700 (PDT) Subject: [pypy-commit] pypy default: Document branches Message-ID: <57090eee.a2f2c20a.320cd.5ef5@mx.google.com> Author: Armin Rigo Branch: Changeset: r83594:a28233b58867 Date: 2016-04-09 17:16 +0300 http://bitbucket.org/pypy/pypy/changeset/a28233b58867/ Log: Document branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -40,3 +40,15 @@ .. branch: rposix-for-3 Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. From pypy.commits at gmail.com Sat Apr 9 10:17:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Apr 2016 07:17:14 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: ready to merge Message-ID: <57090eea.2457c20a.61d0d.6178@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83592:3e5d622b62c4 Date: 2016-04-09 17:10 +0300 http://bitbucket.org/pypy/pypy/changeset/3e5d622b62c4/ Log: ready to merge From pypy.commits at gmail.com Sat Apr 9 13:36:15 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Apr 2016 10:36:15 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: reading the intel optimization manual Message-ID: <57093d8f.839a1c0a.67c51.ffffebf9@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5635:ff4bc734329a Date: 2016-04-09 20:36 +0300 http://bitbucket.org/pypy/extradoc/changeset/ff4bc734329a/ Log: reading the intel optimization manual diff --git a/planning/misc.txt b/planning/misc.txt --- a/planning/misc.txt +++ b/planning/misc.txt @@ -9,5 +9,50 @@ virtualizables are a mess of loads/stores in the jit traces modulo is very bad; "x % (2**n)" should be improved even if x might be -negative. Think also about "x % C" for a general C? +negative. Think also about "x % C" for a general C? (Fwiw, a 64-bit +IDIV instruction might be worse than a 32-bit IDIV, but unsure we can +use that.) Maybe tweak RPython so that the Python-level "%" is the +basic llop handled by the JIT (so far it's turned into the C-level "%" +before the codewriter sees the code). +branch prediction: in the jit assembler, write the common path +(e.g. write barriers) such that it is a fall-through, and move +the slow-path code further down + +Micro-fusion: using e.g. "cmp [rax+32],0" is better than two +instructions "mov rdx,[rax+32]; cmp rdx, 0". Also applies to "add +rdx,[rax+32]". *Does not work* with "call [rip+1234]" because it is a +control flow operation using rip-based addressing; unclear how it +compares with "mov r11,<64-bit-const>; call r11". + +Macro-fusion: a "cmp" or "test" immediately followed by a conditional +jump. Works also if the "cmp" or "test" is a reg-mem. *Does not +work* if it is a mem-immediate. It is better to first load the value +in a register. + +Avoid putting references to rsp close to pop/push/call/ret +instructions. + +"lea" is slow in the following forms: + [base+index+offset] with all three operands present + [rbp+index], [r13+index] (because the +0 is always present then) + [rip+offset] + +e.g. replace "lea rsi,[rsi+rdx+1]" by "lea rsi,[rsi+rdx]; lea +rsi,[rsi+1]". + +multibyte NOPs are not full NOPs: pick the register arguments +carefully to reduce dependencies + +when floating-point operations are bitwise equivalent, use the xxxPS +version instead of the xxxPD version. But don't mix integer +operations (e.g. PXOR) and floating-point operations (e.g. XORPS). + +for small loops, check that we spill loop invariants in preference +over spilling non-loop-invariants. + +if a value in a register dies, try to overwrite this register quickly +instead of writing to an old register? + +avoid MOVSD/MOVSS between registers; do a full copy with MOVAPD or +MOVDQA From pypy.commits at gmail.com Sat Apr 9 15:09:41 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 09 Apr 2016 12:09:41 -0700 (PDT) Subject: [pypy-commit] pypy default: kill the depthmap calculation - it's unused by anything and takes memory. Leave the frame size calculations, so we can assert things (why not) Message-ID: <57095375.4c981c0a.e0cb1.06ad@mx.google.com> Author: fijal Branch: Changeset: r83595:cea17113ade7 Date: 2016-04-09 20:09 +0100 http://bitbucket.org/pypy/pypy/changeset/cea17113ade7/ Log: kill the depthmap calculation - it's unused by anything and takes memory. Leave the frame size calculations, so we can assert things (why not) diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -216,9 +216,6 @@ gcroot_markers = None - frame_positions = None - frame_assignments = None - def __init__(self, translated=None): if translated is None: translated = we_are_translated() @@ -323,12 +320,6 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) - if cpu.HAS_CODEMAP: - cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, - self.frame_positions, - self.frame_assignments) - self.frame_positions = None - self.frame_assignments = None return rawstart def _become_a_plain_block_builder(self): diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -41,10 +41,6 @@ RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); -RPY_EXTERN long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, - unsigned int stackdepth); -RPY_EXTERN void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size); - """], separate_module_sources=[ open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + open(os.path.join(srcdir, 'codemap.c'), 'r').read() @@ -64,15 +60,6 @@ pypy_jit_codemap_firstkey = llexternal('pypy_jit_codemap_firstkey', [], lltype.Signed) -pypy_jit_depthmap_add = llexternal('pypy_jit_depthmap_add', - [lltype.Signed, lltype.Signed, - lltype.Signed], lltype.Signed) -pypy_jit_depthmap_clear = llexternal('pypy_jit_depthmap_clear', - [lltype.Signed, lltype.Signed], - lltype.Void) - -stack_depth_at_loc = llexternal('pypy_jit_stack_depth_at_loc', - [lltype.Signed], lltype.Signed) find_codemap_at_addr = llexternal('pypy_find_codemap_at_addr', [lltype.Signed, rffi.CArrayPtr(lltype.Signed)], llmemory.Address) @@ -102,20 +89,6 @@ items = pypy_jit_codemap_del(start, stop - start) if items: lltype.free(items, flavor='raw', track_allocation=False) - pypy_jit_depthmap_clear(start, stop - start) - - def register_frame_depth_map(self, rawstart, rawstop, frame_positions, - frame_assignments): - if not frame_positions: - return - assert len(frame_positions) == len(frame_assignments) - for i in range(len(frame_positions)-1, -1, -1): - pos = rawstart + frame_positions[i] - length = rawstop - pos - if length > 0: - #print "ADD:", pos, length, frame_assignments[i] - pypy_jit_depthmap_add(pos, length, frame_assignments[i]) - rawstop = pos def register_codemap(self, (start, size, l)): items = lltype.malloc(INT_LIST_PTR.TO, len(l), flavor='raw', diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -139,78 +139,3 @@ current_pos = data->bytecode_info[current_pos + 3]; } } - -/************************************************************/ -/*** depthmap storage ***/ -/************************************************************/ - -typedef struct { - unsigned int block_size; - unsigned int stack_depth; -} depthmap_data_t; - -static skipnode_t jit_depthmap_head; - -/*** interface used from codemap.py ***/ - -RPY_EXTERN -long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, - unsigned int stackdepth) -{ - skipnode_t *new = skiplist_malloc(sizeof(depthmap_data_t)); - depthmap_data_t *data; - if (new == NULL) - return -1; /* too bad */ - - new->key = addr; - data = (depthmap_data_t *)new->data; - data->block_size = size; - data->stack_depth = stackdepth; - - pypy_codemap_invalid_set(1); - skiplist_insert(&jit_depthmap_head, new); - pypy_codemap_invalid_set(0); - return 0; -} - -RPY_EXTERN -void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size) -{ - unsigned long search_key = addr + size - 1; - if (size == 0) - return; - - pypy_codemap_invalid_set(1); - while (1) { - /* search for all nodes belonging to the range, and remove them */ - skipnode_t *node = skiplist_search(&jit_depthmap_head, search_key); - if (node->key < addr) - break; /* exhausted */ - skiplist_remove(&jit_depthmap_head, node->key); - free(node); - } - pypy_codemap_invalid_set(0); -} - -/*** interface used from pypy/module/_vmprof ***/ - -RPY_EXTERN -long pypy_jit_stack_depth_at_loc(long loc) -{ - skipnode_t *depthmap = skiplist_search(&jit_depthmap_head, - (unsigned long)loc); - depthmap_data_t *data; - unsigned long rel_addr; - - if (depthmap == &jit_depthmap_head) - return -1; - - rel_addr = (unsigned long)loc - depthmap->key; - data = (depthmap_data_t *)depthmap->data; - if (rel_addr >= data->block_size) - return -1; - - return data->stack_depth; -} - -/************************************************************/ diff --git a/rpython/jit/backend/llsupport/test/test_codemap.py b/rpython/jit/backend/llsupport/test/test_codemap.py --- a/rpython/jit/backend/llsupport/test/test_codemap.py +++ b/rpython/jit/backend/llsupport/test/test_codemap.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.jit.backend.llsupport.codemap import stack_depth_at_loc from rpython.jit.backend.llsupport.codemap import CodemapStorage, \ CodemapBuilder, unpack_traceback, find_codemap_at_addr @@ -27,34 +26,6 @@ # codemap.free() -def test_find_jit_frame_depth(): - codemap = CodemapStorage() - codemap.setup() - codemap.register_frame_depth_map(11, 26, [0, 5, 10], [1, 2, 3]) - codemap.register_frame_depth_map(30, 41, [0, 5, 10], [4, 5, 6]) - codemap.register_frame_depth_map(0, 11, [0, 5, 10], [7, 8, 9]) - assert stack_depth_at_loc(13) == 1 - assert stack_depth_at_loc(-3) == -1 - assert stack_depth_at_loc(40) == 6 - assert stack_depth_at_loc(41) == -1 - assert stack_depth_at_loc(5) == 8 - assert stack_depth_at_loc(17) == 2 - assert stack_depth_at_loc(38) == 5 - assert stack_depth_at_loc(25) == 3 - assert stack_depth_at_loc(26) == -1 - assert stack_depth_at_loc(11) == 1 - assert stack_depth_at_loc(10) == 9 - codemap.free_asm_block(11, 26) - assert stack_depth_at_loc(11) == -1 - assert stack_depth_at_loc(13) == -1 - assert stack_depth_at_loc(-3) == -1 - assert stack_depth_at_loc(40) == 6 - assert stack_depth_at_loc(41) == -1 - assert stack_depth_at_loc(5) == 8 - assert stack_depth_at_loc(38) == 5 - assert stack_depth_at_loc(10) == 9 - codemap.free() - def test_free_with_alignment(): codemap = CodemapStorage() codemap.setup() diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -470,8 +470,6 @@ """Abstract base class.""" def __init__(self): - self.frame_positions = [] - self.frame_assignments = [] self.force_frame_size(self.WORD) def writechar(self, char): @@ -492,15 +490,11 @@ self.writechar(chr((imm >> 24) & 0xFF)) def force_frame_size(self, frame_size): - self.frame_positions.append(self.get_relative_pos()) - self.frame_assignments.append(frame_size) self._frame_size = frame_size def stack_frame_size_delta(self, delta): "Called when we generate an instruction that changes the value of ESP" self._frame_size += delta - self.frame_positions.append(self.get_relative_pos()) - self.frame_assignments.append(self._frame_size) assert self._frame_size >= self.WORD def check_stack_size_at_ret(self): diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -2,7 +2,6 @@ void *pypy_find_codemap_at_addr(long addr, long *start_addr); long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); -long pypy_jit_stack_depth_at_loc(long loc); static long vmprof_write_header_for_jit_addr(intptr_t *result, long n, From pypy.commits at gmail.com Sun Apr 10 08:32:40 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 10 Apr 2016 05:32:40 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix: this logic can result in an external call whose stderr is completely Message-ID: <570a47e8.0113c20a.8b306.ffffb1cb@mx.google.com> Author: Armin Rigo Branch: Changeset: r83596:418eb4be7f64 Date: 2016-04-10 15:32 +0300 http://bitbucket.org/pypy/pypy/changeset/418eb4be7f64/ Log: Fix: this logic can result in an external call whose stderr is completely eaten. Now we print the stderr, at least, and avoid caching results in that case. diff --git a/rpython/tool/gcc_cache.py b/rpython/tool/gcc_cache.py --- a/rpython/tool/gcc_cache.py +++ b/rpython/tool/gcc_cache.py @@ -1,5 +1,5 @@ from hashlib import md5 -import py, os +import py, os, sys def cache_file_path(c_files, eci, cachename): "Builds a filename to cache compilation data" @@ -26,6 +26,8 @@ if ignore_errors: platform.log_errors = False result = platform.execute(platform.compile(c_files, eci)) + if result.err: + sys.stderr.write(result.err) finally: if ignore_errors: del platform.log_errors @@ -33,7 +35,8 @@ # compare equal to another instance without it if platform.log_errors != _previous: platform.log_errors = _previous - try_atomic_write(path, result.out) + if not result.err: + try_atomic_write(path, result.out) return result.out def try_atomic_write(path, data): diff --git a/rpython/tool/test/test_gcc_cache.py b/rpython/tool/test/test_gcc_cache.py --- a/rpython/tool/test/test_gcc_cache.py +++ b/rpython/tool/test/test_gcc_cache.py @@ -93,3 +93,24 @@ finally: sys.stderr = oldstderr assert 'ERROR' not in capture.getvalue().upper() + +def test_execute_code_show_runtime_error(): + f = localudir.join('z.c') + f.write(""" + #include + int main() + { + fprintf(stderr, "hello\\n"); + return 0; + } + """) + for i in range(2): + eci = ExternalCompilationInfo() + oldstderr = sys.stderr + try: + sys.stderr = capture = cStringIO.StringIO() + output = build_executable_cache([f], eci, True) + finally: + sys.stderr = oldstderr + assert 'hello' in capture.getvalue() + assert output == '' From pypy.commits at gmail.com Sun Apr 10 14:15:14 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 10 Apr 2016 11:15:14 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: merge default into branch Message-ID: <570a9832.0173c20a.d3d86.2fd7@mx.google.com> Author: mattip Branch: stdlib-2.7.11 Changeset: r83597:72454c4374d2 Date: 2016-04-10 21:14 +0300 http://bitbucket.org/pypy/pypy/changeset/72454c4374d2/ Log: merge default into branch diff too long, truncating to 2000 out of 13183 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -19,3 +19,4 @@ 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -46,7 +46,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,32 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -291,13 +291,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,23 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + raises(TypeError, _promote, u"abc") + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -124,7 +124,7 @@ s = rffi.charp2str(ptr) else: s = rffi.charp2strn(ptr, length) - return space.wrap(s) + return space.wrapbytes(s) # # pointer to a wchar_t: builds and returns a unicode if self.is_unichar_ptr_or_array(): @@ -372,15 +372,15 @@ rffi_fclose(self.llf) -def prepare_file_argument(space, fileobj): - fileobj.direct_flush() - if fileobj.cffi_fileobj is None: - fd = fileobj.direct_fileno() +def prepare_file_argument(space, w_fileobj): + w_fileobj.direct_flush() + if w_fileobj.cffi_fileobj is None: + fd = w_fileobj.direct_fileno() if fd < 0: raise OperationError(space.w_ValueError, space.wrap("file has no OS file descriptor")) try: - fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) + w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -134,8 +134,14 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -147,8 +153,14 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h --- a/pypy/module/cpyext/include/unicodeobject.h +++ b/pypy/module/cpyext/include/unicodeobject.h @@ -20,8 +20,12 @@ typedef struct { PyObject_HEAD - Py_UNICODE *buffer; + Py_UNICODE *str; Py_ssize_t size; + long hash; /* Hash value; -1 if not set */ + PyObject *defenc; /* (Default) Encoded version as Python + string, or NULL; this is used for + implementing the buffer protocol */ } PyUnicodeObject; diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -139,6 +139,44 @@ ]) module.getstring() + def test_py_string_as_string_Unicode(self): + module = self.import_extension('foo', [ + ("getstring_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + buf = PyString_AsString(u1); + if (buf == NULL) + return NULL; + if (buf[3] != 't') { + PyErr_SetString(PyExc_AssertionError, "Bad conversion"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ("getstringandsize_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(u1, &buf, &len) < 0) + return NULL; + if (len != 4) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring_unicode() + module.getstringandsize_unicode() + def test_format_v(self): module = self.import_extension('foo', [ ("test_string_format_v", "METH_VARARGS", diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -24,7 +24,7 @@ if(PyUnicode_GetSize(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*5) + if(s->ob_type->tp_basicsize != sizeof(void*)*7) result = 0; Py_DECREF(s); return PyBool_FromLong(result); @@ -66,6 +66,7 @@ c = PyUnicode_AsUnicode(s); c[0] = 'a'; c[1] = 0xe9; + c[2] = 0x00; c[3] = 'c'; return s; """), @@ -74,7 +75,35 @@ assert len(s) == 4 assert s == u'a�\x00c' + def test_hash(self): + module = self.import_extension('foo', [ + ("test_hash", "METH_VARARGS", + ''' + PyObject* obj = (PyTuple_GetItem(args, 0)); + long hash = ((PyUnicodeObject*)obj)->hash; + return PyLong_FromLong(hash); + ''' + ), + ]) + res = module.test_hash(u"xyz") + assert res == hash(u'xyz') + def test_default_encoded_string(self): + module = self.import_extension('foo', [ + ("test_default_encoded_string", "METH_O", + ''' + PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace"); + Py_INCREF(result); + return result; + ''' + ), + ]) + res = module.test_default_encoded_string(u"xyz") + assert isinstance(res, str) + assert res == 'xyz' + res = module.test_default_encoded_string(u"caf\xe9") + assert isinstance(res, str) + assert res == 'caf?' class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): @@ -155,22 +184,22 @@ def test_unicode_resize(self, space, api): py_uni = new_empty_unicode(space, 10) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - py_uni.c_buffer[0] = u'a' - py_uni.c_buffer[1] = u'b' - py_uni.c_buffer[2] = u'c' + py_uni.c_str[0] = u'a' + py_uni.c_str[1] = u'b' + py_uni.c_str[2] = u'c' ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 3) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 3 - assert py_uni.c_buffer[1] == u'b' - assert py_uni.c_buffer[3] == u'\x00' + assert py_uni.c_str[1] == u'b' + assert py_uni.c_str[3] == u'\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 10) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 10 - assert py_uni.c_buffer[1] == 'b' - assert py_uni.c_buffer[10] == '\x00' + assert py_uni.c_str[1] == 'b' + assert py_uni.c_str[10] == '\x00' Py_DecRef(space, ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -22,7 +22,8 @@ PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + - (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) + (("str", rffi.CWCHARP), ("size", Py_ssize_t), + ("hash", rffi.LONG), ("defenc", PyObject))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function @@ -54,16 +55,20 @@ buflen = length + 1 py_uni.c_size = length - py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True, - add_memory_pressure=True) + py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True, + add_memory_pressure=True) + py_uni.c_hash = -1 + py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) - py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_hash = space.hash_w(w_obj) + py_unicode.c_defenc = lltype.nullptr(PyObject.TO) def unicode_realize(space, py_obj): """ @@ -71,17 +76,20 @@ be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) - s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size) w_obj = space.wrap(s) + py_uni.c_hash = space.hash_w(w_obj) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) - if py_unicode.c_buffer: - lltype.free(py_unicode.c_buffer, flavor="raw") + if py_unicode.c_str: + lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc + if py_unicode.c_defenc: + PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) @@ -205,12 +213,12 @@ """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) - if not ref_unicode.c_buffer: + if not ref_unicode.c_str: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = rffi.unicode2wcharp(u) - return ref_unicode.c_buffer + ref_unicode.c_str = rffi.unicode2wcharp(u) + return ref_unicode.c_str @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): @@ -241,7 +249,7 @@ string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" - c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) + c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well @@ -251,7 +259,7 @@ i = 0 while i < size: - buf[i] = c_buffer[i] + buf[i] = c_str[i] i += 1 if size > c_size: @@ -343,8 +351,15 @@ return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): - return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) +def _PyUnicode_AsDefaultEncodedString(space, ref, errors): + # Returns a borrowed reference. + py_uni = rffi.cast(PyUnicodeObject, ref) + if not py_uni.c_defenc: + py_uni.c_defenc = make_ref( + space, PyUnicode_AsEncodedString( + space, ref, + lltype.nullptr(rffi.CCHARP.TO), errors)) + return py_uni.c_defenc @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): @@ -444,7 +459,7 @@ def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) - if not py_uni.c_buffer: + if not py_uni.c_str: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: @@ -458,7 +473,7 @@ if oldsize < newsize: to_cp = oldsize for i in range(to_cp): - py_newuni.c_buffer[i] = py_uni.c_buffer[i] + py_newuni.c_str[i] = py_uni.c_str[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2960,6 +2960,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2968,6 +2998,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -79,54 +79,45 @@ else: return _resolve_attr_chain(chain, obj, idx + 1) - -class _simple_attrgetter(object): - def __init__(self, attr): - self._attr = attr +class attrgetter(object): + def __init__(self, attr, *attrs): + if ( + not isinstance(attr, basestring) or + not all(isinstance(a, basestring) for a in attrs) + ): + def _raise_typeerror(obj): + raise TypeError( + "argument must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + elif attrs: + self._multi_attrs = [ + a.split(".") for a in [attr] + list(attrs) + ] + self._call = self._multi_attrgetter + elif "." not in attr: + self._simple_attr = attr + self._call = self._simple_attrgetter + else: + self._single_attr = attr.split(".") + self._call = self._single_attrgetter def __call__(self, obj): - return getattr(obj, self._attr) + return self._call(obj) + def _simple_attrgetter(self, obj): + return getattr(obj, self._simple_attr) -class _single_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs + def _single_attrgetter(self, obj): + return _resolve_attr_chain(self._single_attr, obj) - def __call__(self, obj): - return _resolve_attr_chain(self._attrs, obj) - - -class _multi_attrgetter(object): - def __init__(self, attrs): - self._attrs = attrs - - def __call__(self, obj): + def _multi_attrgetter(self, obj): return tuple([ _resolve_attr_chain(attrs, obj) - for attrs in self._attrs + for attrs in self._multi_attrs ]) -def attrgetter(attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - return _raise_typeerror - if attrs: - return _multi_attrgetter([ - a.split(".") for a in [attr] + list(attrs) - ]) - elif "." not in attr: - return _simple_attrgetter(attr) - else: - return _single_attrgetter(attr.split(".")) - - class itemgetter(object): def __init__(self, item, *items): self._single = not bool(items) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -47,7 +47,13 @@ a.name = "hello" a.child = A() a.child.name = "world" + a.child.foo = "bar" assert attrgetter("child.name")(a) == "world" + assert attrgetter("child.name", "child.foo")(a) == ("world", "bar") + + def test_attrgetter_type(self): + from operator import attrgetter + assert type(attrgetter("child.name")) is attrgetter def test_concat(self): class Seq1: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty @@ -61,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,10 @@ import pytest +from .infrastructure import sane_term - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty @@ -11,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -14,7 +14,7 @@ CoreRegisterManager, check_imm_arg, VFPRegisterManager, operations as regalloc_operations) from rpython.jit.backend.llsupport import jitframe, rewrite -from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, debug_bridge, BaseAssembler +from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale, valid_addressing_size from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.model import CompiledLoopToken @@ -481,8 +481,9 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.regalloc_push(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + self.load_from_gc_table(r.ip.value, faildescrindex) + self.regalloc_push(r.ip) self.push_gcmap(self.mc, gcmap=guardtok.gcmap, push=True) self.mc.BL(target) return startpos @@ -556,7 +557,7 @@ debug_stop('jit-backend-ops') def _call_header(self): - assert self.mc.currpos() == 0 + # there is the gc table before this point self.gen_func_prolog() def _call_header_with_stack_check(self): @@ -596,20 +597,22 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) + regalloc = Regalloc(assembler=self) + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, looptoken, + allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() + self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - regalloc = Regalloc(assembler=self) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) - loop_head = self.mc.get_relative_pos() looptoken._ll_loop_code = loop_head # @@ -620,9 +623,11 @@ self.write_pending_failure_recoveries() + full_size = self.mc.get_relative_pos() rawstart = self.materialize_loop(looptoken) - looptoken._function_addr = looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos + self.patch_gcref_table(looptoken, rawstart) self.process_pending_guards(rawstart) self.fixup_target_tokens(rawstart) @@ -641,7 +646,13 @@ looptoken.number, loopname, r_uint(rawstart + loop_head), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + loop_head)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") return AsmInfo(ops_offset, rawstart + loop_head, @@ -678,27 +689,43 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() + fullsize = self.mc.get_relative_pos() rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.process_pending_guards(rawstart) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") + # patch the jump from original guard self.patch_trace(faildescr, original_loop_token, - rawstart, regalloc) + rawstart + startpos, regalloc) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) @@ -716,9 +743,53 @@ ops_offset=ops_offset) self.teardown() - debug_bridge(descr_number, rawstart, codeendpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (see load_from_gc_table()) + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + # the gc table is at the start of the machine code. Fill it now + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def load_from_gc_table(self, regnum, index): + """emits either: + LDR Rt, [PC, #offset] if -4095 <= offset + or: + gen_load_int(Rt, offset) + LDR Rt, [PC, Rt] for larger offsets + """ + mc = self.mc + address_in_buffer = index * WORD # at the start of the buffer + offset = address_in_buffer - (mc.get_relative_pos() + 8) # negative + if offset >= -4095: + mc.LDR_ri(regnum, r.pc.value, offset) + else: + # The offset we're loading is negative: right now, + # gen_load_int() will always use exactly + # get_max_size_of_gen_load_int() instructions. No point + # in optimizing in case we get less. Just in case though, + # we check and pad with nops. + extra_bytes = mc.get_max_size_of_gen_load_int() * 2 + offset -= extra_bytes + start = mc.get_relative_pos() + mc.gen_load_int(regnum, offset) + while mc.get_relative_pos() != start + extra_bytes: + mc.NOP() + mc.LDR_rr(regnum, r.pc.value, regnum) def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -929,6 +1000,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def _walk_operations(self, inputargs, operations, regalloc): fcond = c.AL self._regalloc = regalloc @@ -939,9 +1016,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +1026,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -35,9 +35,9 @@ class ArmGuardToken(GuardToken): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - offset, guard_opnum, frame_depth, fcond=c.AL): + offset, guard_opnum, frame_depth, faildescrindex, fcond=c.AL): GuardToken.__init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond self.offset = offset @@ -178,6 +178,7 @@ assert isinstance(descr, AbstractFailDescr) gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ArmGuardToken(self.cpu, gcmap, descr, failargs=op.getfailargs(), @@ -185,6 +186,7 @@ offset=offset, guard_opnum=op.getopnum(), frame_depth=frame_depth, + faildescrindex=faildescrindex, fcond=fcond) return token @@ -398,14 +400,13 @@ def emit_op_finish(self, op, arglocs, regalloc, fcond): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs self.store_reg(self.mc, return_val, r.fp, base_ofs) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.gen_load_int(r.ip.value, fail_descr_loc.value) + faildescrindex = self.get_gcref_from_faildescr(op.getdescr()) + self.load_from_gc_table(r.ip.value, faildescrindex) # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: @@ -1035,9 +1036,9 @@ assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(faildescr) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - value = rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr)) - self.mc.gen_load_int(r.ip.value, value) + self.load_from_gc_table(r.ip.value, faildescrindex) self.store_reg(self.mc, r.ip, r.fp, ofs) def _find_nearby_operation(self, delta): @@ -1092,8 +1093,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') @@ -1250,3 +1251,9 @@ self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond + + def emit_op_load_from_gc_table(self, op, arglocs, regalloc, fcond): + res_loc, = arglocs + index = op.getarg(0).getint() + self.load_from_gc_table(res_loc.value, index) + return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1,5 +1,4 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.rlib import rgc from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.jit.backend.llsupport.regalloc import FrameManager, \ RegisterManager, TempVar, compute_vars_longevity, BaseRegalloc, \ @@ -530,7 +529,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return @@ -627,16 +626,11 @@ def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def load_condition_into_cc(self, box): @@ -892,6 +886,10 @@ prepare_op_same_as_r = _prepare_op_same_as prepare_op_same_as_f = _prepare_op_same_as + def prepare_op_load_from_gc_table(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + def prepare_op_call_malloc_nursery(self, op, fcond): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -216,9 +216,6 @@ gcroot_markers = None - frame_positions = None - frame_assignments = None - def __init__(self, translated=None): if translated is None: translated = we_are_translated() @@ -323,12 +320,6 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) - if cpu.HAS_CODEMAP: - cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, - self.frame_positions, - self.frame_assignments) - self.frame_positions = None - self.frame_assignments = None return rawstart def _become_a_plain_block_builder(self): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -23,10 +23,11 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth): + guard_opnum, frame_depth, faildescrindex): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr + self.faildescrindex = faildescrindex self.failargs = failargs self.fail_locs = fail_locs self.gcmap = self.compute_gcmap(gcmap, failargs, @@ -144,6 +145,22 @@ self.codemap_builder = CodemapBuilder() self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + def setup_gcrefs_list(self, allgcrefs): + self._allgcrefs = allgcrefs + self._allgcrefs_faildescr_next = 0 + + def teardown_gcrefs_list(self): + self._allgcrefs = None + + def get_gcref_from_faildescr(self, descr): + """This assumes that it is called in order for all faildescrs.""" + search = cast_instance_to_gcref(descr) + while not _safe_eq( + self._allgcrefs[self._allgcrefs_faildescr_next], search): + self._allgcrefs_faildescr_next += 1 + assert self._allgcrefs_faildescr_next < len(self._allgcrefs) + return self._allgcrefs_faildescr_next + def set_debug(self, v): r = self._debug self._debug = v @@ -186,8 +203,7 @@ break exc = guardtok.must_save_exception() target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + faildescrindex = guardtok.faildescrindex base_ofs = self.cpu.get_baseofs_of_frame_field() # # in practice, about 2/3rd of 'positions' lists that we build are @@ -229,7 +245,7 @@ self._previous_rd_locs = positions # write down the positions of locs guardtok.faildescr.rd_locs = positions - return fail_descr, target + return faildescrindex, target def enter_portal_frame(self, op): if self.cpu.HAS_CODEMAP: @@ -288,7 +304,7 @@ gcref = cast_instance_to_gcref(value) if gcref: - rgc._make_sure_does_not_move(gcref) + rgc._make_sure_does_not_move(gcref) # but should be prebuilt value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # @@ -331,7 +347,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive @@ -451,3 +467,8 @@ r_uint(rawstart + codeendpos))) debug_stop("jit-backend-addr") +def _safe_eq(x, y): + try: + return x == y + except AttributeError: # minor mess + return False diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -41,10 +41,6 @@ RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); -RPY_EXTERN long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, - unsigned int stackdepth); -RPY_EXTERN void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size); - """], separate_module_sources=[ open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + open(os.path.join(srcdir, 'codemap.c'), 'r').read() @@ -64,15 +60,6 @@ pypy_jit_codemap_firstkey = llexternal('pypy_jit_codemap_firstkey', [], lltype.Signed) -pypy_jit_depthmap_add = llexternal('pypy_jit_depthmap_add', - [lltype.Signed, lltype.Signed, - lltype.Signed], lltype.Signed) -pypy_jit_depthmap_clear = llexternal('pypy_jit_depthmap_clear', - [lltype.Signed, lltype.Signed], - lltype.Void) - -stack_depth_at_loc = llexternal('pypy_jit_stack_depth_at_loc', - [lltype.Signed], lltype.Signed) find_codemap_at_addr = llexternal('pypy_find_codemap_at_addr', [lltype.Signed, rffi.CArrayPtr(lltype.Signed)], llmemory.Address) @@ -102,20 +89,6 @@ items = pypy_jit_codemap_del(start, stop - start) if items: lltype.free(items, flavor='raw', track_allocation=False) - pypy_jit_depthmap_clear(start, stop - start) - From pypy.commits at gmail.com Sun Apr 10 14:45:32 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 10 Apr 2016 11:45:32 -0700 (PDT) Subject: [pypy-commit] pypy default: Avoid hanging forever on amd64 Debian/kFreeBSD (where it appears this timeout doesn't work) Message-ID: <570a9f4c.2457c20a.61d0d.3293@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83598:d35ab39f9e4a Date: 2016-04-10 11:44 -0700 http://bitbucket.org/pypy/pypy/changeset/d35ab39f9e4a/ Log: Avoid hanging forever on amd64 Debian/kFreeBSD (where it appears this timeout doesn't work) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -733,6 +733,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) From pypy.commits at gmail.com Mon Apr 11 14:44:36 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 11 Apr 2016 11:44:36 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Create path_or_fd unwrapper Message-ID: <570bf094.6869c20a.44a69.0f0e@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83599:48b561b9e7f0 Date: 2016-04-11 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/48b561b9e7f0/ Log: Create path_or_fd unwrapper diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -112,6 +112,33 @@ return func(fname1, fname2, *args) return dispatch +class Path(object): + _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode'] + + def __init__(self, fd, bytes, unicode): + self.as_fd = fd + self.as_bytes = bytes + self.as_unicode = unicode + +class path_or_fd(Unwrapper): + def unwrap(self, space, w_value): + if _WIN32: + try: + path_u = space.unicode_w(w_value) + return Path(-1, None, path_u) + except OperationError: + pass + try: + path_b = space.fsencode_w(w_value) + return Path(-1, path_b, None) + except OperationError: + pass + if not space.isinstance_w(w_value, space.w_int): + raise oefmt(space.w_TypeError, + "argument should be string, bytes or integer, not %T", w_value) + fd = unwrap_fd(space, w_value) + return Path(fd, None, None) + if hasattr(rposix, 'AT_FDCWD'): DEFAULT_DIR_FD = rposix.AT_FDCWD From pypy.commits at gmail.com Mon Apr 11 14:44:38 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 11 Apr 2016 11:44:38 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Share a bit more code between the different cases in interp_posix.utime() Message-ID: <570bf096.12871c0a.bfc1e.5596@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83600:488852d18575 Date: 2016-04-11 19:43 +0100 http://bitbucket.org/pypy/pypy/changeset/488852d18575/ Log: Share a bit more code between the different cases in interp_posix.utime() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1334,13 +1334,39 @@ not space.is_w(w_ns, space.w_None)): raise oefmt(space.w_ValueError, "utime: you may specify either 'times' or 'ns' but not both") + utime_now = False + if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): + atime_s = mtime_s = 0 + atime_ns = mtime_ns = 0 + utime_now = True + elif not space.is_w(w_times, space.w_None): + times_w = space.fixedview(w_times) + if len(times_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'times' must be either a tuple of two ints or None") + atime_s, atime_ns = convert_seconds(space, times_w[0]) + mtime_s, mtime_ns = convert_seconds(space, times_w[1]) + else: + args_w = space.fixedview(w_ns) + if len(args_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'ns' must be a tuple of two ints") + atime_s, atime_ns = convert_ns(space, args_w[0]) + mtime_s, mtime_ns = convert_ns(space, args_w[1]) if rposix.HAVE_UTIMENSAT: path = space.fsencode_w(w_path) try: - _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks) + if utime_now: + rposix.utimensat( + path, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + else: + rposix.utimensat( + path, atime_s, atime_ns, mtime_s, mtime_ns, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) return - except OSError, e: + except OSError as e: raise wrap_oserror2(space, e, w_path) if not follow_symlinks: @@ -1349,7 +1375,7 @@ if not space.is_w(w_ns, space.w_None): raise oefmt(space.w_NotImplementedError, "utime: 'ns' unsupported on this platform on PyPy") - if space.is_w(w_times, space.w_None): + if utime_now: try: dispatch_filename(rposix.utime, 1)(space, w_path, None) return @@ -1371,29 +1397,6 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) -def _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks): - if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): - atime_s = mtime_s = 0 - atime_ns = mtime_ns = rposix.UTIME_NOW - elif not space.is_w(w_times, space.w_None): - times_w = space.fixedview(w_times) - if len(times_w) != 2: - raise oefmt(space.w_TypeError, - "utime: 'times' must be either a tuple of two ints or None") - atime_s, atime_ns = convert_seconds(space, times_w[0]) - mtime_s, mtime_ns = convert_seconds(space, times_w[1]) - else: - args_w = space.fixedview(w_ns) - if len(args_w) != 2: - raise oefmt(space.w_TypeError, - "utime: 'ns' must be a tuple of two ints") - atime_s, atime_ns = convert_ns(space, args_w[0]) - mtime_s, mtime_ns = convert_ns(space, args_w[1]) - - rposix.utimensat( - path, atime_s, atime_ns, mtime_s, mtime_ns, - dir_fd=dir_fd, follow_symlinks=follow_symlinks) - def convert_seconds(space, w_time): if space.isinstance_w(w_time, space.w_float): time = space.float_w(w_time) From pypy.commits at gmail.com Mon Apr 11 17:56:32 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:32 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: remove outdated headers Message-ID: <570c1d90.e109c20a.35097.495d@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83602:8d781f7a74f7 Date: 2016-04-11 19:17 +0300 http://bitbucket.org/pypy/pypy/changeset/8d781f7a74f7/ Log: remove outdated headers diff too long, truncating to 2000 out of 2378 lines diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,7 +144,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,18 +152,6 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -1207,7 +1195,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ /dev/null @@ -1,233 +0,0 @@ - -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ - -#ifndef Py_NDARRAYOBJECT_H -#define Py_NDARRAYOBJECT_H -#ifdef __cplusplus -extern "C" { -#endif - -#include "old_defines.h" -#include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) - -/* fake PyArrayObject so that code that doesn't do direct field access works */ -#define PyArrayObject PyObject -#define PyArray_Descr PyObject - -PyAPI_DATA(PyTypeObject) PyArray_Type; - - -#define NPY_MAXDIMS 32 - -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) - -PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); -PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); -PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); - -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto - -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif - -#ifdef __cplusplus -} -#endif -#endif /* !Py_NDARRAYOBJECT_H */ diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ /dev/null @@ -1,1786 +0,0 @@ -#ifndef NDARRAYTYPES_H -#define NDARRAYTYPES_H - -#include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - -/* - * These characters correspond to the array type and the struct - * module - */ - -enum NPY_TYPECHAR { - NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_HALFLTR = 'e', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* - * No Descriptor, just a define -- this let's - * Python users specify an array of integers - * large enough to hold a pointer on the - * platform - */ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - /* - * These are for dtype 'kinds', not dtype 'typecodes' - * as the above are for. - */ - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -/* For specifying array memory layout or iteration order */ -typedef enum { - /* Fortran order if inputs are all Fortran, C otherwise */ - NPY_ANYORDER=-1, - /* C order */ - NPY_CORDER=0, - /* Fortran order */ - NPY_FORTRANORDER=1, - /* An order as close to the inputs as possible */ - NPY_KEEPORDER=2 -} NPY_ORDER; - -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of UPDATEIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined - * here. - */ - - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define _PyArray_ITER_NEXT3(it) do { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] \ - (it)->backstrides[1]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - /* - * if subspace iteration, the these are the coordinates to the - * start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - /* - * consec is first used to indicate wether fancy indices are - * consecutive and then denotes at which axis they are inserted - */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -//#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE - -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) - -#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) -/* - * Changing access macros into functions, to allow for future hiding - * of the internal memory layout. This later hiding will allow the 2.x series - * to change the internal representation of arrays without affecting - * ABI compatibility. - */ - -static NPY_INLINE int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} - -static NPY_INLINE void * -PyArray_DATA(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE char * -PyArray_BYTES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE npy_intp * -PyArray_DIMS(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -static NPY_INLINE npy_intp * -PyArray_STRIDES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->strides; -} - -static NPY_INLINE npy_intp -PyArray_DIM(const PyArrayObject *arr, int idim) -{ - return ((PyArrayObject_fields *)arr)->dimensions[idim]; -} - -static NPY_INLINE npy_intp -PyArray_STRIDE(const PyArrayObject *arr, int istride) -{ - return ((PyArrayObject_fields *)arr)->strides[istride]; -} - -static NPY_INLINE PyObject * -PyArray_BASE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->base; -} - -static NPY_INLINE PyArray_Descr * -PyArray_DESCR(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE int -PyArray_FLAGS(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->flags; -} - -static NPY_INLINE npy_intp -PyArray_ITEMSIZE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->elsize; -} - -static NPY_INLINE int -PyArray_TYPE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->type_num; -} - -static NPY_INLINE int -PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) -{ - return (PyArray_FLAGS(arr) & flags) == flags; -} - -static NPY_INLINE PyObject * -PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) -{ - return ((PyArrayObject_fields *)arr)->descr->f->getitem( - (void *)itemptr, (PyArrayObject *)arr); -} - -static NPY_INLINE int -PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) -{ - return ((PyArrayObject_fields *)arr)->descr->f->setitem( - v, itemptr, arr); -} - -#else - -/* These macros are deprecated as of NumPy 1.7. */ -#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) -#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) -#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) -#define PyArray_ITEMSIZE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->type_num) -#define PyArray_GETITEM(obj,itemptr) \ - PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) -#endif - -static NPY_INLINE PyArray_Descr * -PyArray_DTYPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE npy_intp * -PyArray_SHAPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -/* - * Enables the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags |= flags; -} - -/* - * Clears the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags &= ~flags; -} - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) -#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* - * FIXME: This should check for a flag on the data-type that - * states whether or not it is variable length. Because the - * ISFLEXIBLE check is hard-coded to the built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - -/************************************************************ - * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. - ************************************************************/ From pypy.commits at gmail.com Mon Apr 11 17:56:34 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:34 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: use constants from micronumpy rather than redefining them Message-ID: <570c1d92.41d91c0a.7a2b6.ffff900d@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83603:ece0ba3db4e6 Date: 2016-04-11 19:28 +0300 http://bitbucket.org/pypy/pypy/changeset/ece0ba3db4e6/ Log: use constants from micronumpy rather than redefining them diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -11,44 +11,21 @@ from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray +from pypy.module.micronumpy.constants import (ARRAY_C_CONTIGUOUS, + ARRAY_F_CONTIGUOUS, ARRAY_OWNDATA, ARRAY_ALIGNED, ARRAY_WRITEABLE, + ARRAY_NOTSWAPPED, CORDER, FORTRANORDER) from pypy.module.micronumpy import ufuncs -import pypy.module.micronumpy.constants as NPY from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.argument import Arguments from pypy.interpreter.gateway import interp2app -NPY_C_CONTIGUOUS = 0x0001 -NPY_F_CONTIGUOUS = 0x0002 -NPY_OWNDATA = 0x0004 -NPY_FORCECAST = 0x0010 -NPY_ENSURECOPY = 0x0020 -NPY_ENSUREARRAY = 0x0040 -NPY_ELEMENTSTRIDES = 0x0080 -NPY_ALIGNED = 0x0100 -NPY_NOTSWAPPED = 0x0200 -NPY_WRITEABLE = 0x0400 -NPY_UPDATEIFCOPY = 0x1000 +ARRAY_BEHAVED = ARRAY_ALIGNED | ARRAY_WRITEABLE +ARRAY_BEHAVED_NS = ARRAY_ALIGNED | ARRAY_WRITEABLE | ARRAY_NOTSWAPPED +ARRAY_CARRAY = ARRAY_C_CONTIGUOUS | ARRAY_BEHAVED +ARRAY_DEFAULT = ARRAY_CARRAY -NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE -NPY_BEHAVED_NS = NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED -NPY_CARRAY = NPY_C_CONTIGUOUS | NPY_BEHAVED -NPY_CARRAY_RO = NPY_C_CONTIGUOUS | NPY_ALIGNED -NPY_FARRAY = NPY_F_CONTIGUOUS | NPY_BEHAVED -NPY_FARRAY_RO = NPY_F_CONTIGUOUS | NPY_ALIGNED -NPY_DEFAULT = NPY_CARRAY -NPY_IN = NPY_CARRAY_RO -NPY_OUT = NPY_CARRAY -NPY_INOUT = NPY_CARRAY | NPY_UPDATEIFCOPY -NPY_IN_FARRAY = NPY_FARRAY_RO -NPY_OUT_FARRAY = NPY_FARRAY -NPY_INOUT_FARRAY = NPY_FARRAY | NPY_UPDATEIFCOPY -NPY_CONTIGUOUS = NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS -NPY_UPDATE_ALL = NPY_CONTIGUOUS | NPY_ALIGNED - - -# the asserts are needed, otherwise the translation fails @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_Check(space, w_obj): @@ -66,7 +43,7 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_FLAGS(space, w_array): assert isinstance(w_array, W_NDimArray) - flags = NPY_BEHAVED_NS | w_array.get_flags() + flags = ARRAY_BEHAVED_NS | w_array.get_flags() return flags @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) @@ -141,13 +118,13 @@ may be 0. Also, if op is not already an array (or does not expose the array interface), then a new array will be created (and filled from op using the sequence protocol). The new array will have - NPY_DEFAULT as its flags member. + ARRAY_DEFAULT as its flags member. The context argument is passed to the __array__ method of op and is only used if the array is constructed that way. Almost always this parameter is NULL. """ - if requirements not in (0, NPY_DEFAULT): + if requirements not in (0, ARRAY_DEFAULT): raise OperationError(space.w_NotImplementedError, space.wrap( '_PyArray_FromAny called with not-implemented requirements argument')) w_array = array(space, w_obj, w_dtype=w_dtype, copy=False) @@ -204,12 +181,12 @@ return shape, dtype def simple_new(space, nd, dims, typenum, - order=NPY.CORDER, owning=False, w_subtype=None): + order=CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) return W_NDimArray.from_shape(space, shape, dtype) def simple_new_from_data(space, nd, dims, typenum, data, - order=NPY.CORDER, owning=False, w_subtype=None): + order=CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, @@ -228,7 +205,7 @@ def _PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data): # Variant to take over ownership of the memory, equivalent to: # PyObject *arr = PyArray_SimpleNewFromData(nd, dims, typenum, data); - # ((PyArrayObject*)arr)->flags |= NPY_OWNDATA; + # ((PyArrayObject*)arr)->flags |= ARRAY_OWNDATA; return simple_new_from_data(space, nd, dims, typenum, data, owning=True) @@ -239,8 +216,8 @@ raise OperationError(space.w_NotImplementedError, space.wrap("strides must be NULL")) - order = NPY.CORDER if flags & NPY_C_CONTIGUOUS else NPY.FORTRANORDER - owning = True if flags & NPY_OWNDATA else False + order = CORDER if flags & ARRAY_C_CONTIGUOUS else FORTRANORDER + owning = True if flags & ARRAY_OWNDATA else False w_subtype = None if data: From pypy.commits at gmail.com Mon Apr 11 17:56:29 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:29 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: refactor creation of macro definitions and remove numpy includes Message-ID: <570c1d8d.865a1c0a.ab9e7.ffff95af@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83601:b1ddf5e91458 Date: 2016-04-11 22:39 +0300 http://bitbucket.org/pypy/pypy/changeset/b1ddf5e91458/ Log: refactor creation of macro definitions and remove numpy includes From pypy.commits at gmail.com Mon Apr 11 17:56:35 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:35 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: add parts of headers removed in 4518b83c9ee2, fixes for testing Message-ID: <570c1d93.2179c20a.cd842.3e14@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83604:a03329def3ec Date: 2016-04-11 21:47 +0300 http://bitbucket.org/pypy/pypy/changeset/a03329def3ec/ Log: add parts of headers removed in 4518b83c9ee2, fixes for testing diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -0,0 +1,38 @@ + +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ + +#ifndef Py_NDARRAYOBJECT_H +#define Py_NDARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include "npy_common.h" +#include "ndarraytypes.h" + +/* fake PyArrayObject so that code that doesn't do direct field access works */ +#define PyArrayObject PyObject +#define PyArray_Descr PyObject + +PyAPI_DATA(PyTypeObject) PyArray_Type; + +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE + +#define NPY_MAXDIMS 32 + +/* functions defined in ndarrayobject.c*/ + +PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); +PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); +PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); + + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_NDARRAYOBJECT_H */ diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -0,0 +1,123 @@ +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H + +/* For testing ndarrayobject only */ + +#include "numpy/npy_common.h" + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#endif /* NPY_ARRAYTYPES_H */ diff --git a/pypy/module/cpyext/include/numpy/npy_common.h b/pypy/module/cpyext/include/numpy/npy_common.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/npy_common.h @@ -0,0 +1,36 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* For testing ndarrayobject only */ + +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +typedef unsigned char npy_bool; +typedef long npy_int32; +typedef unsigned long npy_uint32; +typedef unsigned long npy_ucs4; +typedef long npy_int64; +typedef unsigned long npy_uint64; +typedef unsigned char npy_uint8; + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef struct { float real, imag; } npy_cfloat; +typedef struct { double real, imag; } npy_cdouble; +typedef npy_cdouble npy_complex128; +#endif //_NPY_COMMON_H_ + diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -5,21 +5,21 @@ void _PyArray_FILLWBYTE(PyObject* obj, int val) { - memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); + memset(_PyArray_DATA(obj), val, _PyArray_NBYTES(obj)); } PyObject* _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran) { - PyObject *arr = PyArray_EMPTY(nd, dims, type_num, fortran); - memset(PyArray_DATA(arr), 0, PyArray_NBYTES(arr)); + PyObject *arr = _PyArray_SimpleNew(nd, dims, type_num); + memset(_PyArray_DATA(arr), 0, _PyArray_NBYTES(arr)); return arr; } int _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) { - memcpy(PyArray_DATA(dest), PyArray_DATA(src), PyArray_NBYTES(dest)); + memcpy(_PyArray_DATA(dest), _PyArray_DATA(src), _PyArray_NBYTES(dest)); return 0; } diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -102,7 +102,7 @@ def test_copy_header_files(tmpdir): - api.copy_header_files(tmpdir, True) + api.copy_header_files(tmpdir) def check(name): f = tmpdir.join(name) assert f.check(file=True) From pypy.commits at gmail.com Mon Apr 11 17:56:42 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:42 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: copy all generated headers Message-ID: <570c1d9a.d5da1c0a.d3c4.6385@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83608:4b1be61fd11d Date: 2016-04-11 23:55 +0300 http://bitbucket.org/pypy/pypy/changeset/4b1be61fd11d/ Log: copy all generated headers diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -148,7 +148,7 @@ # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) From pypy.commits at gmail.com Mon Apr 11 17:56:37 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:37 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: if a header is specified, put the declarations and macros in that header Message-ID: <570c1d95.c9b0c20a.15d05.ffff84ba@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83605:cf9b74da2caa Date: 2016-04-11 22:41 +0300 http://bitbucket.org/pypy/pypy/changeset/cf9b74da2caa/ Log: if a header is specified, put the declarations and macros in that header diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -219,7 +219,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -253,6 +254,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -260,10 +263,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -351,7 +350,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -780,10 +780,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -792,7 +793,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -884,13 +886,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -983,7 +991,7 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] @@ -1008,15 +1016,22 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1146,7 +1161,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) From pypy.commits at gmail.com Mon Apr 11 17:56:39 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:39 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: move ndarray declarations to seperate header, adjust header creation Message-ID: <570c1d97.ca941c0a.47ac7.095e@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83606:45cb9a7c7b7d Date: 2016-04-11 23:19 +0300 http://bitbucket.org/pypy/pypy/changeset/45cb9a7c7b7d/ Log: move ndarray declarations to seperate header, adjust header creation diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -811,6 +811,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -1012,6 +1017,16 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append("#ifndef _PYPY_%s\n" % + header_name.upper().replace('.','_')) + header.append("#define _PYPY_%s\n" % + header_name.upper().replace('.','_')) + header.append("#ifndef PYPY_STANDALONE\n") + header.append("#ifdef __cplusplus") + header.append("extern \"C\" {") + header.append("#endif\n") + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] @@ -1048,13 +1063,16 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') + header.append("#ifdef __cplusplus") + header.append("}") + header.append("#endif") + header.append("#endif /*PYPY_STANDALONE*/\n") + header.append("#endif /*_PYPY_%s_H*/\n" % + header_name.upper().replace('.','_')) for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1204,10 +1222,12 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for name, func in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -27,62 +27,63 @@ ARRAY_DEFAULT = ARRAY_CARRAY - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +HEADER = 'pypy_numpy.h' + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_Check(space, w_obj): w_obj_type = space.type(w_obj) w_type = space.gettypeobject(W_NDimArray.typedef) return (space.is_w(w_obj_type, w_type) or space.is_true(space.issubtype(w_obj_type, w_type))) - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_CheckExact(space, w_obj): w_obj_type = space.type(w_obj) w_type = space.gettypeobject(W_NDimArray.typedef) return space.is_w(w_obj_type, w_type) - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_FLAGS(space, w_array): assert isinstance(w_array, W_NDimArray) flags = ARRAY_BEHAVED_NS | w_array.get_flags() return flags - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_NDIM(space, w_array): assert isinstance(w_array, W_NDimArray) return len(w_array.get_shape()) - at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL, header=HEADER) def _PyArray_DIM(space, w_array, n): assert isinstance(w_array, W_NDimArray) return w_array.get_shape()[n] - at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject, Py_ssize_t], Py_ssize_t, error=CANNOT_FAIL, header=HEADER) def _PyArray_STRIDE(space, w_array, n): assert isinstance(w_array, W_NDimArray) return w_array.implementation.get_strides()[n] - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL, header=HEADER) def _PyArray_SIZE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_size() - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().elsize - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) + at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL, header=HEADER) def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_size() * w_array.get_dtype().elsize - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL, header=HEADER) def _PyArray_TYPE(space, w_array): assert isinstance(w_array, W_NDimArray) return w_array.get_dtype().num - at cpython_api([PyObject], rffi.VOIDP, error=CANNOT_FAIL) + at cpython_api([PyObject], rffi.VOIDP, error=CANNOT_FAIL, header=HEADER) def _PyArray_DATA(space, w_array): # fails on scalars - see PyArray_FromAny() assert isinstance(w_array, W_NDimArray) @@ -92,7 +93,7 @@ NULL = lltype.nullptr(rffi.VOIDP.TO) @cpython_api([PyObject, PyArray_Descr, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.VOIDP], - PyObject) + PyObject, header=HEADER) def _PyArray_FromAny(space, w_obj, w_dtype, min_depth, max_depth, requirements, context): """ This is the main function used to obtain an array from any nested sequence, or object that exposes the array interface, op. The @@ -146,7 +147,7 @@ w_array.implementation.shape = [] return w_array - at cpython_api([Py_ssize_t], PyObject) + at cpython_api([Py_ssize_t], PyObject, header=HEADER) def _PyArray_DescrFromType(space, typenum): try: dtype = get_dtype_cache(space).dtypes_by_num[typenum] @@ -155,7 +156,7 @@ raise OperationError(space.w_ValueError, space.wrap( '_PyArray_DescrFromType called with invalid dtype %d' % typenum)) - at cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject) + at cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject, header=HEADER) def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth): try: dtype = get_dtype_cache(space).dtypes_by_num[typenum] @@ -193,15 +194,15 @@ order=order, owning=owning, w_subtype=w_subtype) - at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject, header=HEADER) def _PyArray_SimpleNew(space, nd, dims, typenum): return simple_new(space, nd, dims, typenum) - at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER) def _PyArray_SimpleNewFromData(space, nd, dims, typenum, data): return simple_new_from_data(space, nd, dims, typenum, data, owning=False) - at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject) + at cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.VOIDP], PyObject, header=HEADER) def _PyArray_SimpleNewFromDataOwning(space, nd, dims, typenum, data): # Variant to take over ownership of the memory, equivalent to: # PyObject *arr = PyArray_SimpleNewFromData(nd, dims, typenum, data); @@ -210,7 +211,7 @@ @cpython_api([rffi.VOIDP, Py_ssize_t, rffi.LONGP, Py_ssize_t, rffi.LONGP, - rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject) + rffi.VOIDP, Py_ssize_t, Py_ssize_t, PyObject], PyObject, header=HEADER) def _PyArray_New(space, subtype, nd, dims, typenum, strides, data, itemsize, flags, obj): if strides: raise OperationError(space.w_NotImplementedError, @@ -232,7 +233,7 @@ # a problem with casting function pointers? @cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, - rffi.CCHARP], PyObject) + rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, signature): w_signature = rffi.charp2str(signature) @@ -258,7 +259,7 @@ return ufunc_generic @cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, - Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject) + Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): w_signature = ','.join(['()'] * nin) + '->' + ','.join(['()'] * nout) diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -1,5 +1,6 @@ #include "Python.h" +#include "pypy_numpy.h" #include "numpy/arrayobject.h" #include /* memset, memcpy */ diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -281,7 +281,8 @@ return _PyArray_DescrFromType(typenum); """ ), - ], prologue='#include ') + ], prologue='''#include +#include ''') arr = mod.test_simplenew() assert arr.shape == (2, 3) assert arr.dtype.num == 11 #float32 dtype @@ -309,7 +310,8 @@ Py_INCREF(obj); return obj; '''), - ], prologue='#include ') + ], prologue='''#include +#include ''') array = ndarray((3, 4), dtype='d') assert mod.check_array(array) is array raises(TypeError, "mod.check_array(42)") @@ -353,6 +355,7 @@ """), ], prologue=''' #include "numpy/ndarraytypes.h" + #include "pypy_numpy.h" /*#include generated by numpy setup.py*/ typedef void (*PyUFuncGenericFunction) (char **args, From pypy.commits at gmail.com Mon Apr 11 17:56:41 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 11 Apr 2016 14:56:41 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: typo Message-ID: <570c1d99.c856c20a.5399f.4b44@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83607:f4521e08f5ac Date: 2016-04-11 23:24 +0300 http://bitbucket.org/pypy/pypy/changeset/f4521e08f5ac/ Log: typo diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1222,7 +1222,7 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS_BY_HEADER.iteritems(): + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): newname = mangle_name('PyPy', name) or name deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, From pypy.commits at gmail.com Tue Apr 12 05:47:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 02:47:56 -0700 (PDT) Subject: [pypy-commit] pypy default: catching up with the backend changes load_from_gc_table. stuffed the gc references before the constant pool, which makes it easy to access them using r13 Message-ID: <570cc44c.8216c20a.8eda8.08bd@mx.google.com> Author: Richard Plangger Branch: Changeset: r83609:2fd0d166512f Date: 2016-04-12 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/2fd0d166512f/ Log: catching up with the backend changes load_from_gc_table. stuffed the gc references before the constant pool, which makes it easy to access them using r13 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -12,7 +12,6 @@ from rpython.jit.metainterp.history import AbstractFailDescr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1,5 +1,5 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, - debug_bridge, DEBUG_COUNTER) + DEBUG_COUNTER) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport import jitframe, rewrite from rpython.jit.backend.model import CompiledLoopToken @@ -78,7 +78,6 @@ self.mc = None self.pool = None - def target_arglocs(self, looptoken): return looptoken._zarch_arglocs @@ -131,28 +130,22 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 - # POOL - #pool_offset = guardtok._pool_offset - #assert pool_offset != -1 - # overwrite the gcmap in the jitframe - #offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET - #self.mc.LG(r.SCRATCH2, l.pool(offset)) - ## overwrite the target in pool - #offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET - ## overwrite!! - #self.pool.overwrite_64(self.mc, offset, target) - #self.mc.LG(r.r14, l.pool(offset)) + self.load_gcref_into(r.SCRATCH, faildescrindex) self.load_gcmap(self.mc, r.SCRATCH2, gcmap=guardtok.gcmap) self.mc.load_imm(r.r14, target) - self.mc.load_imm(r.SCRATCH, fail_descr) self.mc.BCR(c.ANY, r.r14) return startpos + def load_gcref_into(self, register, index): + topoff = index * WORD + size = self.pool.gcref_table_size + self.mc.LG(r.SCRATCH, l.addr(-size + topoff, r.POOL)) + def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): descr = self.cpu.gc_ll_descr.write_barrier_descr if descr is None: @@ -625,7 +618,6 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: @@ -634,10 +626,12 @@ regalloc = Regalloc(assembler=self) # + allgcrefs = [] operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - self.pool.pre_assemble(self, operations) - entrypos = self.mc.get_relative_pos() + looptoken, allgcrefs) + # reserve_gcref_table is handled in pool + self.pool.pre_assemble(self, operations, allgcrefs) + functionpos = self.mc.get_relative_pos() self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, @@ -645,7 +639,7 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - #self.pool.post_assemble(self) + # self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # @@ -653,6 +647,8 @@ if not we_are_translated(): self.mc.trap() # should be never reached rawstart = self.materialize_loop(looptoken) + looptoken._ll_function_addr = rawstart + functionpos + self.patch_gcref_table(looptoken, rawstart) # looptoken._ll_loop_code = looppos + rawstart debug_start("jit-backend-addr") @@ -660,8 +656,15 @@ looptoken.number, loopname, r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") + # self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -670,7 +673,6 @@ looptoken._zarch_rawstart = rawstart looptoken._zarch_fullsize = full_size looptoken._zarch_ops_offset = ops_offset - looptoken._ll_function_addr = rawstart + entrypos if logger: logger.log_loop(inputargs, operations, 0, "rewritten", name=loopname, ops_offset=ops_offset) @@ -683,7 +685,7 @@ # self.cpu.profile_agent.native_code_written(name, # rawstart, full_size) return AsmInfo(ops_offset, rawstart + looppos, - size_excluding_failure_stuff - looppos) + size_excluding_failure_stuff - looppos, rawstart) @rgc.no_release_gil def assemble_bridge(self, faildescr, inputargs, operations, @@ -700,23 +702,34 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, - operations, - self.current_clt.allgcrefs, + operations, allgcrefs, self.current_clt.frame_info) - self.pool.pre_assemble(self, operations, bridge=True) + self.pool.pre_assemble(self, operations, all_gcrefs, bridge=True) startpos = self.mc.get_relative_pos() self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() #self.pool.post_assemble(self) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # + rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) - rawstart = self.materialize_loop(original_loop_token) - debug_bridge(descr_number, rawstart, codeendpos) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart + startpos) @@ -729,7 +742,22 @@ self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos, + rawstart + bridgestartpos) + + def patch_gcref_table(self, looptoken, rawstart): + self.gc_table_addr = rawstart + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers def patch_jump_for_descr(self, faildescr, adr_new_target): # 'faildescr.adr_jump_offset' is the address of an instruction that is a @@ -996,10 +1024,6 @@ for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset # - # POOL - #tok.faildescr.adr_jump_offset = rawstart + \ - # self.pool.pool_start + tok._pool_offset + \ - # RECOVERY_TARGET_POOL_OFFSET tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub relative_target = tok.pos_recovery_stub - tok.pos_jump_offset # @@ -1199,11 +1223,6 @@ # to be executed, thus remove the first opcode self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: - # POOL - #offset = self.pool.get_descr_offset(descr) + \ - # JUMPABS_TARGET_ADDR__POOL_OFFSET - #self.mc.LG(r.SCRATCH, l.pool(offset)) - #self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code) self.mc.load_imm(r.SCRATCH, descr._ll_loop_code) self.mc.BCR(c.ANY, r.SCRATCH) @@ -1211,8 +1230,8 @@ def emit_finish(self, op, arglocs, regalloc): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) > 1: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs if op.getarg(0).type == FLOAT: if return_val.is_in_pool(): self.mc.LDY(r.FP_SCRATCH, return_val) @@ -1223,8 +1242,6 @@ self.mc.LG(r.SCRATCH, return_val) return_val = r.SCRATCH self.mc.STG(return_val, l.addr(base_ofs, r.SPP)) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') @@ -1247,7 +1264,9 @@ gcmap = lltype.nullptr(jitframe.GCMAP) self.load_gcmap(self.mc, r.r9, gcmap) - self.mc.load_imm(r.r10, fail_descr_loc.getint()) + descr = op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(descr) + self.load_gcref_into(r.r10, faildescrindex) self.mc.STG(r.r9, l.addr(ofs2, r.SPP)) self.mc.STG(r.r10, l.addr(ofs, r.SPP)) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -31,9 +31,9 @@ class ZARCHGuardToken(GuardToken): def __init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth, fcond=c.cond_none): + guard_opnum, frame_depth, faildescrindex, fcond=c.cond_none): GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond class AbstractZARCHBuilder(object): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -632,12 +632,19 @@ def build_guard_token(self, op, frame_depth, arglocs, fcond): descr = op.getdescr() gcmap = allocate_gcmap(self, frame_depth, r.JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ZARCHGuardToken(self.cpu, gcmap, descr, op.getfailargs(), arglocs, op.getopnum(), frame_depth, - fcond) + faildescrindex, fcond) #token._pool_offset = self.pool.get_descr_offset(descr) return token + def emit_load_from_gc_table(self, op, arglocs, regalloc): + resloc, = arglocs + index = op.getarg(0).getint() + assert isinstance(resloc, RegLoc) + self.load_gcref_into(resloc, index) + def emit_guard_true(self, op, arglocs, regalloc): self._emit_guard(op, arglocs) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -99,21 +99,23 @@ self.size = val assert val >= 0 - def pre_assemble(self, asm, operations, bridge=False): - # O(len(operations)). I do not think there is a way - # around this. - # + def pre_assemble(self, asm, operations, allgcrefs, bridge=False): # Problem: # constants such as floating point operations, plain pointers, # or integers might serve as parameter to an operation. thus - # it must be loaded into a register. There is a space benefit - # for 64-bit integers, or python floats, when a constant is used - # twice. + # it must be loaded into a register. Loading them from immediate + # takes quite long and slows down the resulting JIT code. + # There is a space benefit for 64-bit integers/doubles used twice. # - # Solution: - # the current solution (gcc does the same), use a literal pool - # located at register r13. This one can easily offset with 20 - # bit signed values (should be enough) + # creates the table for gc references here + self.gc_table_addr = asm.mc.get_relative_pos() + self.gcref_table_size = len(allgcrefs) * WORD + mc = asm.mc + assert mc.get_relative_pos() == 0 + for i in range(self.gcref_table_size): + mc.writechar('\x00') + asm.setup_gcrefs_list(allgcrefs) + self.pool_start = asm.mc.get_relative_pos() for op in operations: self.ensure_can_hold_constants(asm, op) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1215,18 +1215,16 @@ src_locations2, dst_locations2, fptmploc, WORD) return [] + def prepare_load_from_gc_table(self, op): + resloc = self.rm.ensure_reg(op) + return [resloc] + def prepare_finish(self, op): - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) - assert fail_descr > 0 if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def notimplemented(self, op): From pypy.commits at gmail.com Tue Apr 12 05:47:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 02:47:58 -0700 (PDT) Subject: [pypy-commit] pypy default: wrong parameter to LG command and wrong jump target to bridge Message-ID: <570cc44e.c818c20a.51aa1.02f8@mx.google.com> Author: Richard Plangger Branch: Changeset: r83610:b55e5e29e2f0 Date: 2016-04-12 11:47 +0200 http://bitbucket.org/pypy/pypy/changeset/b55e5e29e2f0/ Log: wrong parameter to LG command and wrong jump target to bridge diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -144,7 +144,7 @@ def load_gcref_into(self, register, index): topoff = index * WORD size = self.pool.gcref_table_size - self.mc.LG(r.SCRATCH, l.addr(-size + topoff, r.POOL)) + self.mc.LG(register, l.addr(-size + topoff, r.POOL)) def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): descr = self.cpu.gc_ll_descr.write_barrier_descr @@ -706,11 +706,11 @@ operations = regalloc.prepare_bridge(inputargs, arglocs, operations, allgcrefs, self.current_clt.frame_info) - self.pool.pre_assemble(self, operations, all_gcrefs, bridge=True) - startpos = self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) + startpos = len(allgcrefs) * WORD + self.pool.pre_assemble(self, operations, allgcrefs, bridge=True) self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - bridgestartpos)) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() #self.pool.post_assemble(self) @@ -732,7 +732,7 @@ debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard - self.patch_jump_for_descr(faildescr, rawstart + startpos) + self.patch_jump_for_descr(faildescr, rawstart + bridgestartpos) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -642,7 +642,7 @@ def emit_load_from_gc_table(self, op, arglocs, regalloc): resloc, = arglocs index = op.getarg(0).getint() - assert isinstance(resloc, RegLoc) + assert resloc.is_reg() self.load_gcref_into(resloc, index) def emit_guard_true(self, op, arglocs, regalloc): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1216,7 +1216,7 @@ return [] def prepare_load_from_gc_table(self, op): - resloc = self.rm.ensure_reg(op) + resloc = self.rm.force_allocate_reg(op) return [resloc] def prepare_finish(self, op): From pypy.commits at gmail.com Tue Apr 12 07:46:40 2016 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 12 Apr 2016 04:46:40 -0700 (PDT) Subject: [pypy-commit] pypy default: cpyext: Move header logic to .h files, and remove the #ifdef guards, these _decl.h files are truly internal. Message-ID: <570ce020.41d91c0a.7a2b6.ffff82db@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r83611:b66575a3eea9 Date: 2016-04-12 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/b66575a3eea9/ Log: cpyext: Move header logic to .h files, and remove the #ifdef guards, these _decl.h files are truly internal. Also skip the declaration of structmember_decl.h when compiling PyPy itself. (declarations generated in forwarddecl.h sometimes differ a bit, e.g. "const char*" is replaced by "char*) This will be important when PyMember_Get() is changed to accept a "const char*". diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1001,12 +1001,6 @@ functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1047,11 +1041,6 @@ pypy_decls.append('#undef Signed /* xxx temporary fix */\n') pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -132,7 +132,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -78,7 +78,11 @@ /* API functions. */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE #include "pypy_structmember_decl.h" +#endif #ifdef __cplusplus From pypy.commits at gmail.com Tue Apr 12 07:46:42 2016 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 12 Apr 2016 04:46:42 -0700 (PDT) Subject: [pypy-commit] pypy default: cpyext: Fix the signature of PyMember_GetOne, it takes a const char* Message-ID: <570ce022.06d8c20a.38efb.34f0@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r83612:17abc9ee7546 Date: 2016-04-12 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/17abc9ee7546/ Log: cpyext: Fix the signature of PyMember_GetOne, it takes a const char* (even if everybody passes PyObject*) diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -2,7 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.structmemberdefs import * -from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api +from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api, CONST_STRING from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref @@ -34,7 +34,7 @@ _HEADER = 'pypy_structmember_decl.h' - at cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) + at cpython_api([CONST_STRING, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset @@ -85,7 +85,7 @@ return w_result - at cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, + at cpython_api([rffi.CCHARP, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, error=-1, header=_HEADER) def PyMember_SetOne(space, obj, w_member, w_value): addr = rffi.cast(ADDR, obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -271,17 +271,32 @@ def member_getter(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - return PyMember_GetOne(space, w_self, self.member) + pyref = make_ref(space, w_self) + try: + return PyMember_GetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member) + finally: + Py_DecRef(space, pyref) def member_delete(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, None) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, None) + finally: + Py_DecRef(space, pyref) def member_setter(self, space, w_self, w_value): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, w_value) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, w_value) + finally: + Py_DecRef(space, pyref) class W_PyCTypeObject(W_TypeObject): @jit.dont_look_inside From pypy.commits at gmail.com Tue Apr 12 08:01:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 05:01:43 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: catchup with default Message-ID: <570ce3a7.8673c20a.c9221.4263@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83613:03ed7b8b8776 Date: 2016-04-12 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/03ed7b8b8776/ Log: catchup with default diff too long, truncating to 2000 out of 3747 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -35,3 +35,20 @@ .. branch: win32-lib-name +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,9 +277,18 @@ raise NotImplementedError def get_traceback(self): - """Get the PyTraceback object, for app-level Python code. + """Calling this marks the PyTraceback as escaped, i.e. it becomes + accessible and inspectable by app-level Python code. For the JIT. + Note that this has no effect if there are already several traceback + frames recorded, because in this case they are already marked as + escaping by executioncontext.leave() being called with + got_exception=True. """ - return self._application_traceback + from pypy.interpreter.pytraceback import PyTraceback + tb = self._application_traceback + if tb is not None and isinstance(tb, PyTraceback): + tb.frame.mark_as_escaped() + return tb def set_traceback(self, traceback): """Set the current traceback.""" diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -74,6 +74,15 @@ finally: frame_vref = self.topframeref self.topframeref = frame.f_backref + if frame.escaped or got_exception: + # if this frame escaped to applevel, we must ensure that also + # f_back does + f_back = frame.f_backref() + if f_back: + f_back.mark_as_escaped() + # force the frame (from the JIT point of view), so that it can + # be accessed also later + frame_vref() jit.virtual_ref_finish(frame_vref, frame) # ________________________________________________________________ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,6 +65,7 @@ last_exception = None f_backref = jit.vref_None + escaped = False # see mark_as_escaped() debugdata = None pycode = None # code object executed by that frame @@ -151,6 +152,15 @@ assert isinstance(cell, Cell) return cell + def mark_as_escaped(self): + """ + Must be called on frames that are exposed to applevel, e.g. by + sys._getframe(). This ensures that the virtualref holding the frame + is properly forced by ec.leave(), and thus the frame will be still + accessible even after the corresponding C stack died. + """ + self.escaped = True + def append_block(self, block): assert block.previous is self.lastblock self.lastblock = block diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -53,7 +53,7 @@ assert _promote(1) == 1 assert _promote(1.1) == 1.1 assert _promote("abc") == "abc" - assert _promote(u"abc") == u"abc" + raises(TypeError, _promote, u"abc") l = [] assert _promote(l) is l class A(object): diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -733,6 +733,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1001,12 +1001,6 @@ functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1047,11 +1041,6 @@ pypy_decls.append('#undef Signed /* xxx temporary fix */\n') pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -132,7 +132,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -78,7 +78,11 @@ /* API functions. */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE #include "pypy_structmember_decl.h" +#endif #ifdef __cplusplus diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -2,7 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.structmemberdefs import * -from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api +from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api, CONST_STRING from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref @@ -34,7 +34,7 @@ _HEADER = 'pypy_structmember_decl.h' - at cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) + at cpython_api([CONST_STRING, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset @@ -85,7 +85,7 @@ return w_result - at cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, + at cpython_api([rffi.CCHARP, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, error=-1, header=_HEADER) def PyMember_SetOne(space, obj, w_member, w_value): addr = rffi.cast(ADDR, obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -271,17 +271,32 @@ def member_getter(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - return PyMember_GetOne(space, w_self, self.member) + pyref = make_ref(space, w_self) + try: + return PyMember_GetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member) + finally: + Py_DecRef(space, pyref) def member_delete(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, None) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, None) + finally: + Py_DecRef(space, pyref) def member_setter(self, space, w_self, w_value): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, w_value) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, w_value) + finally: + Py_DecRef(space, pyref) class W_PyCTypeObject(W_TypeObject): @jit.dont_look_inside diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -37,6 +37,7 @@ raise OperationError(space.w_ValueError, space.wrap("call stack is not deep enough")) if depth == 0: + f.mark_as_escaped() return space.wrap(f) depth -= 1 f = ec.getnextframe_nohidden(f) diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py --- a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py +++ b/pypy/module/test_lib_pypy/pyrepl/infrastructure.py @@ -18,6 +18,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import print_function +from contextlib import contextmanager +import os + from pyrepl.reader import Reader from pyrepl.console import Console, Event @@ -71,3 +74,14 @@ con = TestConsole(test_spec, verbose=True) reader = reader_class(con) reader.readline() + + + at contextmanager +def sane_term(): + """Ensure a TERM that supports clear""" + old_term, os.environ['TERM'] = os.environ.get('TERM'), 'xterm' + yield + if old_term is not None: + os.environ['TERM'] = old_term + else: + del os.environ['TERM'] diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -18,7 +18,7 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from pyrepl.historical_reader import HistoricalReader -from .infrastructure import EA, BaseTestReader, read_spec +from .infrastructure import EA, BaseTestReader, sane_term, read_spec # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports @@ -46,7 +46,8 @@ read_spec(spec, HistoricalTestReader) - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_signal_failure(monkeypatch): import os import pty @@ -61,13 +62,14 @@ mfd, sfd = pty.openpty() try: - c = UnixConsole(sfd, sfd) - c.prepare() - c.restore() - monkeypatch.setattr(signal, 'signal', failing_signal) - c.prepare() - monkeypatch.setattr(signal, 'signal', really_failing_signal) - c.restore() + with sane_term(): + c = UnixConsole(sfd, sfd) + c.prepare() + c.restore() + monkeypatch.setattr(signal, 'signal', failing_signal) + c.prepare() + monkeypatch.setattr(signal, 'signal', really_failing_signal) + c.restore() finally: os.close(mfd) os.close(sfd) diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py b/pypy/module/test_lib_pypy/pyrepl/test_readline.py --- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py @@ -1,7 +1,10 @@ import pytest +from .infrastructure import sane_term - at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") + + at pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") def test_raw_input(): import os import pty @@ -11,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') - result = readline_wrapper.get_reader().readline() + with sane_term(): + result = readline_wrapper.get_reader().readline() #result = readline_wrapper.raw_input('prompt:') assert result == 'input' # A bytes string on python2, a unicode string on python3. diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum>=0.4.6 # is a dependency, but old pip does not pick it up enum34>=1.1.2 diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -77,6 +77,7 @@ for c in s: buf.append(c) buf.append(' ') +rpython_print_item._annenforceargs_ = (str,) def rpython_print_newline(): buf = stdoutbuffer.linebuf diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -14,7 +14,7 @@ CoreRegisterManager, check_imm_arg, VFPRegisterManager, operations as regalloc_operations) from rpython.jit.backend.llsupport import jitframe, rewrite -from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, debug_bridge, BaseAssembler +from rpython.jit.backend.llsupport.assembler import DEBUG_COUNTER, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale, valid_addressing_size from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.model import CompiledLoopToken @@ -481,8 +481,9 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.regalloc_push(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + self.load_from_gc_table(r.ip.value, faildescrindex) + self.regalloc_push(r.ip) self.push_gcmap(self.mc, gcmap=guardtok.gcmap, push=True) self.mc.BL(target) return startpos @@ -556,7 +557,7 @@ debug_stop('jit-backend-ops') def _call_header(self): - assert self.mc.currpos() == 0 + # there is the gc table before this point self.gen_func_prolog() def _call_header_with_stack_check(self): @@ -596,20 +597,22 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: operations = self._inject_debugging_code(looptoken, operations, 'e', looptoken.number) + regalloc = Regalloc(assembler=self) + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, looptoken, + allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() + self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - regalloc = Regalloc(assembler=self) - operations = regalloc.prepare_loop(inputargs, operations, looptoken, - clt.allgcrefs) - loop_head = self.mc.get_relative_pos() looptoken._ll_loop_code = loop_head # @@ -620,9 +623,11 @@ self.write_pending_failure_recoveries() + full_size = self.mc.get_relative_pos() rawstart = self.materialize_loop(looptoken) - looptoken._function_addr = looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos + self.patch_gcref_table(looptoken, rawstart) self.process_pending_guards(rawstart) self.fixup_target_tokens(rawstart) @@ -641,7 +646,13 @@ looptoken.number, loopname, r_uint(rawstart + loop_head), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + loop_head)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") return AsmInfo(ops_offset, rawstart + loop_head, @@ -678,27 +689,43 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries() + fullsize = self.mc.get_relative_pos() rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.process_pending_guards(rawstart) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") + # patch the jump from original guard self.patch_trace(faildescr, original_loop_token, - rawstart, regalloc) + rawstart + startpos, regalloc) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) @@ -716,9 +743,53 @@ ops_offset=ops_offset) self.teardown() - debug_bridge(descr_number, rawstart, codeendpos) + return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (see load_from_gc_table()) + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + # the gc table is at the start of the machine code. Fill it now + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def load_from_gc_table(self, regnum, index): + """emits either: + LDR Rt, [PC, #offset] if -4095 <= offset + or: + gen_load_int(Rt, offset) + LDR Rt, [PC, Rt] for larger offsets + """ + mc = self.mc + address_in_buffer = index * WORD # at the start of the buffer + offset = address_in_buffer - (mc.get_relative_pos() + 8) # negative + if offset >= -4095: + mc.LDR_ri(regnum, r.pc.value, offset) + else: + # The offset we're loading is negative: right now, + # gen_load_int() will always use exactly + # get_max_size_of_gen_load_int() instructions. No point + # in optimizing in case we get less. Just in case though, + # we check and pad with nops. + extra_bytes = mc.get_max_size_of_gen_load_int() * 2 + offset -= extra_bytes + start = mc.get_relative_pos() + mc.gen_load_int(regnum, offset) + while mc.get_relative_pos() != start + extra_bytes: + mc.NOP() + mc.LDR_rr(regnum, r.pc.value, regnum) def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -929,6 +1000,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def _walk_operations(self, inputargs, operations, regalloc): fcond = c.AL self._regalloc = regalloc diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -35,9 +35,9 @@ class ArmGuardToken(GuardToken): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - offset, guard_opnum, frame_depth, fcond=c.AL): + offset, guard_opnum, frame_depth, faildescrindex, fcond=c.AL): GuardToken.__init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond self.offset = offset @@ -178,6 +178,7 @@ assert isinstance(descr, AbstractFailDescr) gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ArmGuardToken(self.cpu, gcmap, descr, failargs=op.getfailargs(), @@ -185,6 +186,7 @@ offset=offset, guard_opnum=op.getopnum(), frame_depth=frame_depth, + faildescrindex=faildescrindex, fcond=fcond) return token @@ -398,14 +400,13 @@ def emit_op_finish(self, op, arglocs, regalloc, fcond): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs self.store_reg(self.mc, return_val, r.fp, base_ofs) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mc.gen_load_int(r.ip.value, fail_descr_loc.value) + faildescrindex = self.get_gcref_from_faildescr(op.getdescr()) + self.load_from_gc_table(r.ip.value, faildescrindex) # XXX self.mov(fail_descr_loc, RawStackLoc(ofs)) self.store_reg(self.mc, r.ip, r.fp, ofs, helper=r.lr) if op.numargs() > 0 and op.getarg(0).type == REF: @@ -1035,9 +1036,9 @@ assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(faildescr) ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - value = rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr)) - self.mc.gen_load_int(r.ip.value, value) + self.load_from_gc_table(r.ip.value, faildescrindex) self.store_reg(self.mc, r.ip, r.fp, ofs) def _find_nearby_operation(self, delta): @@ -1250,3 +1251,9 @@ self._load_from_mem(res_loc, res_loc, ofs_loc, imm(scale), signed, fcond) return fcond + + def emit_op_load_from_gc_table(self, op, arglocs, regalloc, fcond): + res_loc, = arglocs + index = op.getarg(0).getint() + self.load_from_gc_table(res_loc.value, index) + return fcond diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1,5 +1,4 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.rlib import rgc from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.jit.backend.llsupport.regalloc import FrameManager, \ RegisterManager, TempVar, compute_vars_longevity, BaseRegalloc, \ @@ -627,16 +626,11 @@ def prepare_op_finish(self, op, fcond): # the frame is in fp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def load_condition_into_cc(self, box): @@ -892,6 +886,10 @@ prepare_op_same_as_r = _prepare_op_same_as prepare_op_same_as_f = _prepare_op_same_as + def prepare_op_load_from_gc_table(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + def prepare_op_call_malloc_nursery(self, op, fcond): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], - MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_PPC_64: ['floats'], MODEL_S390_64: ['floats'], }[backend_name] diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -216,9 +216,6 @@ gcroot_markers = None - frame_positions = None - frame_assignments = None - def __init__(self, translated=None): if translated is None: translated = we_are_translated() @@ -335,12 +332,6 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) - if cpu.HAS_CODEMAP: - cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, - self.frame_positions, - self.frame_assignments) - self.frame_positions = None - self.frame_assignments = None return rawstart def _become_a_plain_block_builder(self): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -23,10 +23,11 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth): + guard_opnum, frame_depth, faildescrindex): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr + self.faildescrindex = faildescrindex self.failargs = failargs self.fail_locs = fail_locs self.gcmap = self.compute_gcmap(gcmap, failargs, @@ -144,6 +145,22 @@ self.codemap_builder = CodemapBuilder() self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + def setup_gcrefs_list(self, allgcrefs): + self._allgcrefs = allgcrefs + self._allgcrefs_faildescr_next = 0 + + def teardown_gcrefs_list(self): + self._allgcrefs = None + + def get_gcref_from_faildescr(self, descr): + """This assumes that it is called in order for all faildescrs.""" + search = cast_instance_to_gcref(descr) + while not _safe_eq( + self._allgcrefs[self._allgcrefs_faildescr_next], search): + self._allgcrefs_faildescr_next += 1 + assert self._allgcrefs_faildescr_next < len(self._allgcrefs) + return self._allgcrefs_faildescr_next + def set_debug(self, v): r = self._debug self._debug = v @@ -186,8 +203,7 @@ break exc = guardtok.must_save_exception() target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + faildescrindex = guardtok.faildescrindex base_ofs = self.cpu.get_baseofs_of_frame_field() # # in practice, about 2/3rd of 'positions' lists that we build are @@ -229,7 +245,7 @@ self._previous_rd_locs = positions # write down the positions of locs guardtok.faildescr.rd_locs = positions - return fail_descr, target + return faildescrindex, target def enter_portal_frame(self, op): if self.cpu.HAS_CODEMAP: @@ -288,7 +304,7 @@ gcref = cast_instance_to_gcref(value) if gcref: - rgc._make_sure_does_not_move(gcref) + rgc._make_sure_does_not_move(gcref) # but should be prebuilt value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # @@ -456,3 +472,8 @@ r_uint(rawstart + codeendpos))) debug_stop("jit-backend-addr") +def _safe_eq(x, y): + try: + return x == y + except AttributeError: # minor mess + return False diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -41,10 +41,6 @@ RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); -RPY_EXTERN long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, - unsigned int stackdepth); -RPY_EXTERN void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size); - """], separate_module_sources=[ open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + open(os.path.join(srcdir, 'codemap.c'), 'r').read() @@ -64,15 +60,6 @@ pypy_jit_codemap_firstkey = llexternal('pypy_jit_codemap_firstkey', [], lltype.Signed) -pypy_jit_depthmap_add = llexternal('pypy_jit_depthmap_add', - [lltype.Signed, lltype.Signed, - lltype.Signed], lltype.Signed) -pypy_jit_depthmap_clear = llexternal('pypy_jit_depthmap_clear', - [lltype.Signed, lltype.Signed], - lltype.Void) - -stack_depth_at_loc = llexternal('pypy_jit_stack_depth_at_loc', - [lltype.Signed], lltype.Signed) find_codemap_at_addr = llexternal('pypy_find_codemap_at_addr', [lltype.Signed, rffi.CArrayPtr(lltype.Signed)], llmemory.Address) @@ -102,20 +89,6 @@ items = pypy_jit_codemap_del(start, stop - start) if items: lltype.free(items, flavor='raw', track_allocation=False) - pypy_jit_depthmap_clear(start, stop - start) - - def register_frame_depth_map(self, rawstart, rawstop, frame_positions, - frame_assignments): - if not frame_positions: - return - assert len(frame_positions) == len(frame_assignments) - for i in range(len(frame_positions)-1, -1, -1): - pos = rawstart + frame_positions[i] - length = rawstop - pos - if length > 0: - #print "ADD:", pos, length, frame_assignments[i] - pypy_jit_depthmap_add(pos, length, frame_assignments[i]) - rawstop = pos def register_codemap(self, (start, size, l)): items = lltype.malloc(INT_LIST_PTR.TO, len(l), flavor='raw', diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -22,38 +22,6 @@ from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo -class MovableObjectTracker(object): - - ptr_array_type = lltype.GcArray(llmemory.GCREF) - ptr_array_gcref = lltype.nullptr(llmemory.GCREF.TO) - - def __init__(self, cpu, const_pointers): - size = len(const_pointers) - # check that there are any moving object (i.e. chaning pointers). - # Otherwise there is no reason for an instance of this class. - assert size > 0 - # - # prepare GC array to hold the pointers that may change - self.ptr_array = lltype.malloc(MovableObjectTracker.ptr_array_type, size) - self.ptr_array_descr = cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - self.ptr_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self.ptr_array) - # use always the same ConstPtr to access the array - # (easer to read JIT trace) - self.const_ptr_gcref_array = ConstPtr(self.ptr_array_gcref) - # - # assign each pointer an index and put the pointer into the GC array. - # as pointers and addresses are not a good key to use before translation - # ConstPtrs are used as the key for the dict. - self._indexes = {} - for index in range(size): - ptr = const_pointers[index] - self._indexes[ptr] = index - self.ptr_array[index] = ptr.value - - def get_array_index(self, const_ptr): - index = self._indexes[const_ptr] - assert const_ptr.value == self.ptr_array[index] - return index # ____________________________________________________________ class GcLLDescription(GcCache): @@ -129,96 +97,9 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, - ops_with_movable_const_ptr, - changeable_const_pointers): - l = None - for i in range(op.numargs()): - v = op.getarg(i) - if isinstance(v, ConstPtr) and bool(v.value): - p = v.value - if rgc._make_sure_does_not_move(p): - gcrefs_output_list.append(p) - else: - if l is None: - l = [i] - else: - l.append(i) - if v not in changeable_const_pointers: - changeable_const_pointers.append(v) - # - if op.is_guard() or op.getopnum() == rop.FINISH: - llref = cast_instance_to_gcref(op.getdescr()) - assert rgc._make_sure_does_not_move(llref) - gcrefs_output_list.append(llref) - # - if l: - ops_with_movable_const_ptr[op] = l - - def _rewrite_changeable_constptrs(self, op, ops_with_movable_const_ptr, moving_obj_tracker): - newops = [] - for arg_i in ops_with_movable_const_ptr[op]: - v = op.getarg(arg_i) - # assert to make sure we got what we expected - assert isinstance(v, ConstPtr) - array_index = moving_obj_tracker.get_array_index(v) - - size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - array_index = array_index * size + offset - args = [moving_obj_tracker.const_ptr_gcref_array, - ConstInt(array_index), - ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_R, args) - newops.append(load_op) - op.setarg(arg_i, load_op) - # - newops.append(op) - return newops - def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) - newops = rewriter.rewrite(operations) - - # the key is an operation that contains a ConstPtr as an argument and - # this ConstPtrs pointer might change as it points to an object that - # can't be made non-moving (e.g. the object is pinned). - ops_with_movable_const_ptr = {} - # - # a list of such not really constant ConstPtrs. - changeable_const_pointers = [] - for op in newops: - # record all GCREFs, because the GC (or Boehm) cannot see them and - # keep them alive if they end up as constants in the assembler. - # If such a GCREF can change and we can't make the object it points - # to non-movable, we have to handle it seperatly. Such GCREF's are - # returned as ConstPtrs in 'changeable_const_pointers' and the - # affected operation is returned in 'op_with_movable_const_ptr'. - # For this special case see 'rewrite_changeable_constptrs'. - self._record_constptrs(op, gcrefs_output_list, - ops_with_movable_const_ptr, changeable_const_pointers) - # - # handle pointers that are not guaranteed to stay the same - if len(ops_with_movable_const_ptr) > 0: - moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers) - # - if not we_are_translated(): - # used for testing - self.last_moving_obj_tracker = moving_obj_tracker - # make sure the array containing the pointers is not collected by - # the GC (or Boehm) - gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) - rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) - - ops = newops - newops = [] - for op in ops: - if op in ops_with_movable_const_ptr: - rewritten_ops = self._rewrite_changeable_constptrs(op, - ops_with_movable_const_ptr, moving_obj_tracker) - newops.extend(rewritten_ops) - else: - newops.append(op) - # + newops = rewriter.rewrite(operations, gcrefs_output_list) return newops @specialize.memo() @@ -244,6 +125,14 @@ """ return jitframe.JITFRAME.allocate(frame_info) + def make_gcref_tracer(self, array_base_addr, gcrefs): + # for tests, or for Boehm. Overridden for framework GCs + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_boehm_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + pass # nothing needed unless overridden + class JitFrameDescrs: def _freeze_(self): return True @@ -752,6 +641,13 @@ p = rffi.cast(rffi.CCHARP, p) return (ord(p[0]) & IS_OBJECT_FLAG) != 0 + def make_gcref_tracer(self, array_base_addr, gcrefs): + from rpython.jit.backend.llsupport import gcreftracer + return gcreftracer.make_framework_tracer(array_base_addr, gcrefs) + + def clear_gcref_tracer(self, tracer): + tracer.array_length = 0 + # ____________________________________________________________ def get_ll_description(gcdescr, translator=None, rtyper=None): diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -0,0 +1,49 @@ +from rpython.rlib import rgc +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.jit.backend.llsupport.symbolic import WORD + + +GCREFTRACER = lltype.GcStruct( + 'GCREFTRACER', + ('array_base_addr', lltype.Signed), + ('array_length', lltype.Signed), + rtti=True) + +def gcrefs_trace(gc, obj_addr, callback, arg): + obj = llmemory.cast_adr_to_ptr(obj_addr, lltype.Ptr(GCREFTRACER)) + i = 0 + length = obj.array_length + addr = obj.array_base_addr + while i < length: + p = rffi.cast(llmemory.Address, addr + i * WORD) + gc._trace_callback(callback, arg, p) + i += 1 +lambda_gcrefs_trace = lambda: gcrefs_trace + +def make_framework_tracer(array_base_addr, gcrefs): + # careful about the order here: the allocation of the GCREFTRACER + # can trigger a GC. So we must write the gcrefs into the raw + # array only afterwards... + rgc.register_custom_trace_hook(GCREFTRACER, lambda_gcrefs_trace) + length = len(gcrefs) + tr = lltype.malloc(GCREFTRACER) + # --no GC from here-- + tr.array_base_addr = array_base_addr + tr.array_length = length + i = 0 + while i < length: + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + i += 1 + llop.gc_writebarrier(lltype.Void, tr) + # --no GC until here-- + return tr + +def make_boehm_tracer(array_base_addr, gcrefs): + # copy the addresses, but return 'gcrefs' as the object that must be + # kept alive + for i in range(len(gcrefs)): + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + return gcrefs diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -246,6 +246,13 @@ def free_loop_and_bridges(self, compiled_loop_token): AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) + # turn off all gcreftracers + tracers = compiled_loop_token.asmmemmgr_gcreftracers + if tracers is not None: + compiled_loop_token.asmmemmgr_gcreftracers = None + for tracer in tracers: + self.gc_ll_descr.clear_gcref_tracer(tracer) + # then free all blocks of code and raw data blocks = compiled_loop_token.asmmemmgr_blocks if blocks is not None: compiled_loop_token.asmmemmgr_blocks = None diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,10 +1,12 @@ from rpython.rlib import rgc -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, r_dict from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers +from rpython.jit.metainterp.typesystem import rd_eq, rd_hash from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) @@ -94,21 +96,28 @@ op = self.get_box_replacement(op) orig_op = op replaced = False + opnum = op.getopnum() + keep = (opnum == rop.JIT_DEBUG) for i in range(op.numargs()): orig_arg = op.getarg(i) arg = self.get_box_replacement(orig_arg) + if isinstance(arg, ConstPtr) and bool(arg.value) and not keep: + arg = self.remove_constptr(arg) if orig_arg is not arg: if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if rop.is_guard(op.opnum): + if rop.is_guard(opnum): if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) op.setfailargs([self.get_box_replacement(a, True) for a in op.getfailargs()]) + if rop.is_guard(opnum) or opnum == rop.FINISH: + llref = cast_instance_to_gcref(op.getdescr()) + self.gcrefs_output_list.append(llref) self._newops.append(op) def replace_op_with(self, op, newop): @@ -304,13 +313,16 @@ return False - def rewrite(self, operations): + def rewrite(self, operations, gcrefs_output_list): # we can only remember one malloc since the next malloc can possibly # collect; but we can try to collapse several known-size mallocs into # one, both for performance and to reduce the number of write # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + self.gcrefs_output_list = gcrefs_output_list + self.gcrefs_map = None + self.gcrefs_recently_loaded = None operations = self.remove_bridge_exception(operations) self._changed_op = None for i in range(len(operations)): @@ -333,8 +345,7 @@ elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: - self.emitting_an_operation_that_can_collect() - self._known_lengths.clear() + self.emit_label() # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: @@ -940,3 +951,37 @@ operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): return operations[:start] + operations[start+3:] return operations + + def emit_label(self): + self.emitting_an_operation_that_can_collect() + self._known_lengths.clear() + self.gcrefs_recently_loaded = None + + def _gcref_index(self, gcref): + if self.gcrefs_map is None: + self.gcrefs_map = r_dict(rd_eq, rd_hash) + try: + return self.gcrefs_map[gcref] + except KeyError: + pass + index = len(self.gcrefs_output_list) + self.gcrefs_map[gcref] = index + self.gcrefs_output_list.append(gcref) + return index + + def remove_constptr(self, c): + """Remove all ConstPtrs, and replace them with load_from_gc_table. + """ + # Note: currently, gcrefs_recently_loaded is only cleared in + # LABELs. We'd like something better, like "don't spill it", + # but that's the wrong level... + index = self._gcref_index(c.value) + if self.gcrefs_recently_loaded is None: + self.gcrefs_recently_loaded = {} + try: + load_op = self.gcrefs_recently_loaded[index] + except KeyError: + load_op = ResOperation(rop.LOAD_FROM_GC_TABLE, [ConstInt(index)]) + self._newops.append(load_op) + self.gcrefs_recently_loaded[index] = load_op + return load_op diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -139,78 +139,3 @@ current_pos = data->bytecode_info[current_pos + 3]; } } - -/************************************************************/ -/*** depthmap storage ***/ -/************************************************************/ - -typedef struct { - unsigned int block_size; - unsigned int stack_depth; -} depthmap_data_t; - -static skipnode_t jit_depthmap_head; - -/*** interface used from codemap.py ***/ - -RPY_EXTERN -long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, - unsigned int stackdepth) -{ - skipnode_t *new = skiplist_malloc(sizeof(depthmap_data_t)); - depthmap_data_t *data; - if (new == NULL) - return -1; /* too bad */ - - new->key = addr; - data = (depthmap_data_t *)new->data; - data->block_size = size; - data->stack_depth = stackdepth; - - pypy_codemap_invalid_set(1); - skiplist_insert(&jit_depthmap_head, new); - pypy_codemap_invalid_set(0); - return 0; -} - -RPY_EXTERN -void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size) -{ - unsigned long search_key = addr + size - 1; - if (size == 0) - return; - - pypy_codemap_invalid_set(1); - while (1) { - /* search for all nodes belonging to the range, and remove them */ - skipnode_t *node = skiplist_search(&jit_depthmap_head, search_key); - if (node->key < addr) - break; /* exhausted */ - skiplist_remove(&jit_depthmap_head, node->key); - free(node); - } - pypy_codemap_invalid_set(0); -} - -/*** interface used from pypy/module/_vmprof ***/ - -RPY_EXTERN -long pypy_jit_stack_depth_at_loc(long loc) -{ - skipnode_t *depthmap = skiplist_search(&jit_depthmap_head, - (unsigned long)loc); - depthmap_data_t *data; - unsigned long rel_addr; - - if (depthmap == &jit_depthmap_head) - return -1; - - rel_addr = (unsigned long)loc - depthmap->key; - data = (depthmap_data_t *)depthmap->data; - if (rel_addr >= data->block_size) - return -1; - - return data->stack_depth; -} - -/************************************************************/ diff --git a/rpython/jit/backend/llsupport/test/test_codemap.py b/rpython/jit/backend/llsupport/test/test_codemap.py --- a/rpython/jit/backend/llsupport/test/test_codemap.py +++ b/rpython/jit/backend/llsupport/test/test_codemap.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.jit.backend.llsupport.codemap import stack_depth_at_loc from rpython.jit.backend.llsupport.codemap import CodemapStorage, \ CodemapBuilder, unpack_traceback, find_codemap_at_addr @@ -27,34 +26,6 @@ # codemap.free() -def test_find_jit_frame_depth(): - codemap = CodemapStorage() - codemap.setup() - codemap.register_frame_depth_map(11, 26, [0, 5, 10], [1, 2, 3]) - codemap.register_frame_depth_map(30, 41, [0, 5, 10], [4, 5, 6]) - codemap.register_frame_depth_map(0, 11, [0, 5, 10], [7, 8, 9]) - assert stack_depth_at_loc(13) == 1 - assert stack_depth_at_loc(-3) == -1 - assert stack_depth_at_loc(40) == 6 - assert stack_depth_at_loc(41) == -1 - assert stack_depth_at_loc(5) == 8 - assert stack_depth_at_loc(17) == 2 - assert stack_depth_at_loc(38) == 5 - assert stack_depth_at_loc(25) == 3 - assert stack_depth_at_loc(26) == -1 - assert stack_depth_at_loc(11) == 1 - assert stack_depth_at_loc(10) == 9 - codemap.free_asm_block(11, 26) - assert stack_depth_at_loc(11) == -1 - assert stack_depth_at_loc(13) == -1 - assert stack_depth_at_loc(-3) == -1 - assert stack_depth_at_loc(40) == 6 - assert stack_depth_at_loc(41) == -1 - assert stack_depth_at_loc(5) == 8 - assert stack_depth_at_loc(38) == 5 - assert stack_depth_at_loc(10) == 9 - codemap.free() - def test_free_with_alignment(): codemap = CodemapStorage() codemap.setup() diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -196,31 +196,6 @@ assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs) assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte) - def test_record_constptrs(self): - class MyFakeCPU(object): - def cast_adr_to_int(self, adr): - assert adr == "some fake address" - return 43 - class MyFakeGCRefList(object): - def get_address_of_gcref(self, s_gcref1): - assert s_gcref1 == s_gcref - return "some fake address" - S = lltype.GcStruct('S') - s = lltype.malloc(S) - s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - v_random_box = InputArgRef() - operations = [ - ResOperation(rop.PTR_EQ, [v_random_box, ConstPtr(s_gcref)]), - ] - gc_ll_descr = self.gc_ll_descr - gc_ll_descr.gcrefs = MyFakeGCRefList() - gcrefs = [] - operations = get_deep_immutable_oplist(operations) - operations2 = gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations, - gcrefs) - assert operations2 == operations - assert gcrefs == [s_gcref] - class TestFrameworkMiniMark(TestFramework): gc = 'minimark' diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -0,0 +1,53 @@ +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.jit.backend.llsupport.gcreftracer import GCREFTRACER, gcrefs_trace +from rpython.jit.backend.llsupport.gcreftracer import make_framework_tracer +from rpython.jit.backend.llsupport.gcreftracer import make_boehm_tracer + + +class FakeGC: + def __init__(self): + self.called = [] + def _trace_callback(self, callback, arg, addr): + assert callback == "callback" + assert arg == "arg" + assert lltype.typeOf(addr) == llmemory.Address + self.called.append(addr) + + +def test_gcreftracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + a[0] = 123 + a[1] = 456 + a[2] = 789 + tr = lltype.malloc(GCREFTRACER) + tr.array_base_addr = base = rffi.cast(lltype.Signed, a) + tr.array_length = 3 + gc = FakeGC() + gcrefs_trace(gc, llmemory.cast_ptr_to_adr(tr), "callback", "arg") + assert len(gc.called) == 3 + WORD = rffi.sizeof(lltype.Signed) + for i in range(3): + assert gc.called[i] == rffi.cast(llmemory.Address, base + i * WORD) + lltype.free(a, flavor='raw') + +def test_make_framework_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + tr = make_framework_tracer(base, [123, 456, 789]) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr.array_base_addr == base + assert tr.array_length == 3 + lltype.free(a, flavor='raw') + +def test_make_boehm_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + lst = [123, 456, 789] + tr = make_boehm_tracer(base, lst) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr is lst + lltype.free(a, flavor='raw') diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ /dev/null @@ -1,149 +0,0 @@ -from test_rewrite import get_size_descr, get_array_descr, get_description, BaseFakeCPU -from rpython.jit.backend.llsupport.descr import get_size_descr,\ - get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescr, get_interiorfield_descr -from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ - GcLLDescr_framework, MovableObjectTracker -from rpython.jit.backend.llsupport import jitframe, gc -from rpython.jit.metainterp.gc import get_description -from rpython.jit.tool.oparser import parse -from rpython.jit.metainterp.optimizeopt.util import equaloplists -from rpython.jit.metainterp.history import JitCellToken, FLOAT -from rpython.rtyper.lltypesystem import lltype, rffi, lltype, llmemory -from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD -from rpython.rlib import rgc - -class Evaluator(object): - def __init__(self, scope): - self.scope = scope - def __getitem__(self, key): - return eval(key, self.scope) - - -class FakeLoopToken(object): - pass - -# The following class is based on rpython.jit.backend.llsupport.test.test_rewrite.RewriteTests. -# It's modified to be able to test the object pinning specific features. -class RewriteTests(object): - def check_rewrite(self, frm_operations, to_operations, **namespace): - # objects to use inside the test - A = lltype.GcArray(lltype.Signed) - adescr = get_array_descr(self.gc_ll_descr, A) - adescr.tid = 4321 - alendescr = adescr.lendescr - # - pinned_obj_type = lltype.GcStruct('PINNED_STRUCT', ('my_int', lltype.Signed)) - pinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, pinned_obj_type, 'my_int') - pinned_obj_ptr = lltype.malloc(pinned_obj_type) - pinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, pinned_obj_ptr) - assert rgc.pin(pinned_obj_gcref) - # - notpinned_obj_type = lltype.GcStruct('NOT_PINNED_STRUCT', ('my_int', lltype.Signed)) - notpinned_obj_my_int_descr = get_field_descr(self.gc_ll_descr, notpinned_obj_type, 'my_int') - notpinned_obj_ptr = lltype.malloc(notpinned_obj_type) - notpinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, notpinned_obj_ptr) - # - ptr_array_descr = self.cpu.arraydescrof(MovableObjectTracker.ptr_array_type) - # - vtable_descr = self.gc_ll_descr.fielddescr_vtable - O = lltype.GcStruct('O', ('parent', rclass.OBJECT), - ('x', lltype.Signed)) - o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) - # - tiddescr = self.gc_ll_descr.fielddescr_tid - wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] - # - strdescr = self.gc_ll_descr.str_descr - unicodedescr = self.gc_ll_descr.unicode_descr - strlendescr = strdescr.lendescr - unicodelendescr = unicodedescr.lendescr - - casmdescr = JitCellToken() - clt = FakeLoopToken() - clt._ll_initial_locs = [0, 8] - frame_info = lltype.malloc(jitframe.JITFRAMEINFO, flavor='raw') - clt.frame_info = frame_info - frame_info.jfi_frame_depth = 13 - frame_info.jfi_frame_size = 255 - framedescrs = self.gc_ll_descr.getframedescrs(self.cpu) - framelendescr = framedescrs.arraydescr.lendescr - jfi_frame_depth = framedescrs.jfi_frame_depth - jfi_frame_size = framedescrs.jfi_frame_size - jf_frame_info = framedescrs.jf_frame_info - signedframedescr = self.cpu.signedframedescr - floatframedescr = self.cpu.floatframedescr - casmdescr.compiled_loop_token = clt - tzdescr = None # noone cares - # - namespace.update(locals()) - # - for funcname in self.gc_ll_descr._generated_functions: - namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname) - namespace[funcname + '_descr'] = getattr(self.gc_ll_descr, - '%s_descr' % funcname) - # - ops = parse(frm_operations, namespace=namespace) - operations = self.gc_ll_descr.rewrite_assembler(self.cpu, - ops.operations, - []) - # make the array containing the GCREF's accessible inside the tests. - # This must be done after we call 'rewrite_assembler'. Before that - # call 'last_moving_obj_tracker' is None or filled with some old - # value. - namespace['ptr_array_gcref'] = self.gc_ll_descr.last_moving_obj_tracker.ptr_array_gcref - expected = parse(to_operations % Evaluator(namespace), - namespace=namespace) - equaloplists(operations, expected.operations) - lltype.free(frame_info, flavor='raw') - -class TestFramework(RewriteTests): - def setup_method(self, meth): - class config_(object): - class translation(object): - gc = 'minimark' - gcrootfinder = 'asmgcc' - gctransformer = 'framework' - gcremovetypeptr = False - gcdescr = get_description(config_) - self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, - really_not_translated=True) - self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( - lambda cpu: True) - # - class FakeCPU(BaseFakeCPU): - def sizeof(self, STRUCT, is_object): - descr = SizeDescr(104) - descr.tid = 9315 - descr.vtable = 12 - return descr - self.cpu = FakeCPU() - - def test_simple_getfield(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 - - def test_simple_getfield_twice(self): - self.check_rewrite(""" - [] - i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - i1 = getfield_gc_i(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr) - i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) - """, """ - [] - p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) - i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) - p2 = gc_load_r(ConstPtr(ptr_array_gcref), %(1 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) - i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) - """) - assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -10,9 +10,8 @@ from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import JitCellToken, FLOAT from rpython.jit.metainterp.history import AbstractFailDescr -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) @@ -77,6 +76,9 @@ tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + myT = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(T, zero=True)) + self.myT = myT # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) @@ -112,6 +114,12 @@ xdescr = get_field_descr(self.gc_ll_descr, R1, 'x') ydescr = get_field_descr(self.gc_ll_descr, R1, 'y') zdescr = get_field_descr(self.gc_ll_descr, R1, 'z') + myR1 = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) + myR1b = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) + self.myR1 = myR1 + self.myR1b = myR1b # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) @@ -174,9 +182,10 @@ ops = parse(frm_operations, namespace=namespace) expected = parse(to_operations % Evaluator(namespace), namespace=namespace) + self.gcrefs = [] operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, - []) + self.gcrefs) remap = {} for a, b in zip(ops.inputargs, expected.inputargs): remap[b] = a @@ -1281,3 +1290,124 @@ {t} jump() """.format(**locals())) + + def test_load_from_gc_table_1i(self): + self.check_rewrite(""" + [i1] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_1p(self): + self.check_rewrite(""" + [p1] + setfield_gc(ConstPtr(myT), p1, descr=tzdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + cond_call_gc_wb(p0, descr=wbdescr) + gc_store(p0, %(tzdescr.offset)s, i1, %(tzdescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myT] + + def test_load_from_gc_table_2(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + gc_store(p0, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_3(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + label(f2) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + label(f2) + p1 = load_from_gc_table(0) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_4(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1b), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + p1 = load_from_gc_table(1) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1, self.myR1b] + + def test_pinned_simple_getfield(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1] + + def test_pinned_simple_getfield_twice(self): + # originally in test_pinned_object_rewrite; now should give the + # same result for pinned objects and for normal objects + self.check_rewrite(""" + [] + i0 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + i1 = getfield_gc_i(ConstPtr(myR1b), descr=xdescr) + i2 = getfield_gc_i(ConstPtr(myR1), descr=xdescr) + """, """ + [] + p1 = load_from_gc_table(0) + i0 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + p2 = load_from_gc_table(1) + i1 = gc_load_i(p2, %(xdescr.offset)s, -%(xdescr.field_size)s) + i2 = gc_load_i(p1, %(xdescr.offset)s, -%(xdescr.field_size)s) + """) + assert self.gcrefs == [self.myR1, self.myR1b] + + def test_guard_in_gcref(self): + self.check_rewrite(""" + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """, """ + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """) + assert len(self.gcrefs) == 2 diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -6,6 +6,7 @@ from rpython.rlib import rthread from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.rlib.rweaklist import RWeakListMixin class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() @@ -21,6 +22,7 @@ class MyCode: _vmprof_unique_id = 0 + _vmprof_weak_list = RWeakListMixin() ; _vmprof_weak_list.initialize() def __init__(self, name): self.name = name diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -285,7 +285,7 @@ class CompiledLoopToken(object): asmmemmgr_blocks = None - asmmemmgr_gcroots = 0 + asmmemmgr_gcreftracers = None def __init__(self, cpu, number): cpu.tracker.total_compiled_loops += 1 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -499,10 +499,13 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -513,6 +516,7 @@ full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) + self.patch_gcref_table(looptoken, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) looptoken._ll_loop_code = looppos + rawstart @@ -521,7 +525,13 @@ looptoken.number, loopname, r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # @@ -531,10 +541,11 @@ looptoken._x86_rawstart = rawstart looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset - looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) log.write(inputargs, operations, None, ops_offset=ops_offset, unique_id=rawstart) + self.fixup_target_tokens(rawstart) self.teardown() # oprofile support @@ -563,11 +574,13 @@ 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() self._update_at_exit(arglocs, inputargs, faildescr, regalloc) @@ -577,14 +590,22 @@ fullsize = self.mc.get_relative_pos() # rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) - debug_bridge(descr_number, rawstart, codeendpos) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard - if logger: - logger.log_patch_guard(descr_number, rawstart) - self.patch_jump_for_descr(faildescr, rawstart) + self.patch_jump_for_descr(faildescr, rawstart + startpos) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) @@ -669,6 +690,39 @@ mc.JMP_r(X86_64_SCRATCH_REG.value) mc.copy_to_raw_memory(adr_jump_offset) + def reserve_gcref_table(self, allgcrefs): + gcref_table_size = len(allgcrefs) * WORD + if IS_X86_64: + # align to a multiple of 16 and reserve space at the beginning + # of the machine code for the gc table. This lets us write + # machine code with relative addressing (%rip - constant). + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + elif IS_X86_32: + # allocate the gc table right now. This lets us write + # machine code with absolute 32-bit addressing. + self.gc_table_addr = self.datablockwrapper.malloc_aligned( + gcref_table_size, alignment=WORD) + # + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + if IS_X86_64: + # the gc table is at the start of the machine code + self.gc_table_addr = rawstart + elif IS_X86_32: + # the gc table was already allocated by reserve_gcref_table() + rawstart = self.gc_table_addr + # + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -792,6 +846,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None @@ -1370,6 +1430,29 @@ genop_cast_ptr_to_int = _genop_same_as genop_cast_int_to_ptr = _genop_same_as + def _patch_load_from_gc_table(self, index): + # must be called immediately after a "p"-mode instruction + # has been emitted. 64-bit mode only. + assert IS_X86_64 + address_in_buffer = index * WORD # at the start of the buffer + p_location = self.mc.get_relative_pos() + offset = address_in_buffer - p_location + self.mc.overwrite32(p_location-4, offset) + From pypy.commits at gmail.com Tue Apr 12 08:57:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 05:57:33 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: use tmpdir instead of tempfile, added a version to the jitlog Message-ID: <570cf0bd.83561c0a.1f2da.ffff9d09@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83614:68ab27b22964 Date: 2016-04-12 14:56 +0200 http://bitbucket.org/pypy/pypy/changeset/68ab27b22964/ Log: use tmpdir instead of tempfile, added a version to the jitlog diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py --- a/rpython/jit/backend/x86/test/test_jitlog.py +++ b/rpython/jit/backend/x86/test/test_jitlog.py @@ -4,11 +4,13 @@ from rpython.jit.tool.oparser import pure_parse from rpython.jit.metainterp import logger from rpython.jit.metainterp.typesystem import llhelper +from rpython.jit.metainterp.jitlog import JITLOG_VERSION_16BIT_LE from StringIO import StringIO from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr, BasicFinalDescr from rpython.jit.backend.model import AbstractCPU from rpython.rlib.jit import JitDriver +from rpython.rlib.objectmodel import always_inline from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.backend.x86.test.test_basic import Jit386Mixin from rpython.rlib.rvmprof import rvmprof @@ -16,42 +18,44 @@ class TestLogger(Jit386Mixin): - def test_explicit_enable(self): + def test_explicit_enable(self, tmpdir): vmprof = rvmprof.VMProf() - fileno, name = tempfile.mkstemp() - self.run_sample_loop(lambda: vmprof.enable_jitlog(fileno)) - assert os.path.exists(name) - with open(name, 'rb') as fd: + file = tmpdir.join('jitlog') + fileno = os.open(file.strpath, os.O_WRONLY | os.O_CREAT) + enable_jitlog = lambda: vmprof.enable_jitlog(fileno) + f = self.run_sample_loop(enable_jitlog) + self.meta_interp(f, [10, 0]) + + assert os.path.exists(file.strpath) + with file.open('rb') as f: # check the file header - assert fd.read(3) == '\x23\xfe\xaf' + assert f.read(3) == '\x23' + JITLOG_VERSION_16BIT_LE + assert len(f.read()) > 0 + + def test_env(self, monkeypatch, tmpdir): + file = tmpdir.join('jitlog') + monkeypatch.setenv("JITLOG", file.strpath) + f = self.run_sample_loop(None) + self.meta_interp(f, [10,0]) + assert os.path.exists(file.strpath) + with file.open('rb') as fd: + # check the file header + assert fd.read(3) == '\x23' + JITLOG_VERSION_16BIT_LE assert len(fd.read()) > 0 - print(name) - def test_env(self, monkeypatch): - fileno, name = tempfile.mkstemp() - monkeypatch.setenv("JITLOG", name) - self.run_sample_loop(None) - assert os.path.exists(name) - with open(name, 'rb') as fd: - # check the file header - assert fd.read(3) == '\x23\xfe\xaf' - assert len(fd.read()) > 0 - print(name) + def test_version(self): + pass - def run_sample_loop(self, func): - myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) - def f(x, y): + def run_sample_loop(self, func, myjitdriver = None): + if not myjitdriver: + myjitdriver = JitDriver(greens = [], reds = 'auto') + def f(y, x): res = 0 if func: func() while y > 0: - myjitdriver.can_enter_jit(x=x, y=y, res=res) - myjitdriver.jit_merge_point(x=x, y=y, res=res) + myjitdriver.jit_merge_point() res += x - if res > 40: - res += 1 - res -= 2 - res += 1 y -= 1 return res - res = self.meta_interp(f, [6, 20]) + return f diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -7,6 +7,10 @@ from rpython.rlib.objectmodel import compute_unique_id, always_inline import sys import weakref +import struct + +JITLOG_VERSION = 1 +JITLOG_VERSION_16BIT_LE = struct.pack(" Author: Richard Plangger Branch: new-jit-log Changeset: r83615:25e8a7bf37c2 Date: 2016-04-12 15:03 +0200 http://bitbucket.org/pypy/pypy/changeset/25e8a7bf37c2/ Log: added test to ensure version number is correctly written diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py --- a/rpython/jit/backend/x86/test/test_jitlog.py +++ b/rpython/jit/backend/x86/test/test_jitlog.py @@ -5,6 +5,7 @@ from rpython.jit.metainterp import logger from rpython.jit.metainterp.typesystem import llhelper from rpython.jit.metainterp.jitlog import JITLOG_VERSION_16BIT_LE +from rpython.jit.metainterp import jitlog from StringIO import StringIO from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr, BasicFinalDescr @@ -43,8 +44,29 @@ assert fd.read(3) == '\x23' + JITLOG_VERSION_16BIT_LE assert len(fd.read()) > 0 - def test_version(self): - pass + def test_version(self, monkeypatch, tmpdir): + file = tmpdir.join('jitlog') + monkeypatch.setattr(jitlog, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') + monkeypatch.setenv("JITLOG", file.strpath) + f = self.run_sample_loop(None) + self.meta_interp(f, [10,0]) + assert os.path.exists(file.strpath) + with file.open('rb') as fd: + # check the file header + assert fd.read(3) == '\x23\xff\xfe' + assert len(fd.read()) > 0 + + def test_version(self, monkeypatch, tmpdir): + file = tmpdir.join('jitlog') + monkeypatch.setattr(jitlog, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') + monkeypatch.setenv("JITLOG", file.strpath) + f = self.run_sample_loop(None) + self.meta_interp(f, [10,0]) + assert os.path.exists(file.strpath) + with file.open('rb') as fd: + # check the file header + assert fd.read(3) == '\x23\xff\xfe' + assert len(fd.read()) > 0 def run_sample_loop(self, func, myjitdriver = None): if not myjitdriver: From pypy.commits at gmail.com Tue Apr 12 09:49:28 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 06:49:28 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: (work in progress) adding a new API to the jit driver to return a rather "generic" list of primitive types that describe the execution state of any meta interpreter Message-ID: <570cfce8.d5da1c0a.d3c4.ffff8750@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83616:a7f5611a8203 Date: 2016-04-12 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/a7f5611a8203/ Log: (work in progress) adding a new API to the jit driver to return a rather "generic" list of primitive types that describe the execution state of any meta interpreter diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -41,6 +41,11 @@ from rpython.rlib import rvmprof return rvmprof.get_unique_id(bytecode) +def get_location(next_instr, is_being_profiled, bytecode): + from pypy.tool.stdlib_opcode import opcode_method_names + name = opcode_method_names[ord(bytecode.co_code[next_instr])] + return (bytecode.co_filename, + )'%s #%d %s' % (bytecode.get_repr(), next_instr, name) def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -197,8 +197,8 @@ def encode_debug_info(self, op): log = self.logger jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - info = jd_sd.warmstate.get_location_str(op.getarg(2)) - log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, encode_str(info)) + filename, = jd_sd.warmstate.get_location(op.getarglist()[3:]) + log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, encode_str(filename)) def encode_op(self, op): diff --git a/rpython/jit/metainterp/test/test_jitlog.py b/rpython/jit/metainterp/test/test_jitlog.py --- a/rpython/jit/metainterp/test/test_jitlog.py +++ b/rpython/jit/metainterp/test/test_jitlog.py @@ -12,9 +12,11 @@ class FakeJitDriver(object): class warmstate(object): @staticmethod - def get_location_str(ptr): - if ptr.value == 0: - return 'string #3 BYTE_CODE' + def get_location_str(): + return 'string #3 BYTE_CODE' + def get_location(greenkey_list): + assert len(greenkey_list) == 0 + return '/home/pypy/jit.py', 0 class FakeMetaInterpSd: cpu = AbstractCPU() @@ -40,5 +42,20 @@ assert binary.startswith(b'\x00\x04\x00\x00\x00loop') assert binary.endswith(b'\x24\x06\x00\x00\x00string\x00\x00\x00\x00\x00\x00\x00\x00') + def test_debug_merge_point(self, tmpdir): + logger = jitlog.VMProfJitLogger() + file = tmpdir.join('binary_file') + file.ensure() + fd = file.open('wb') + logger.cintf.jitlog_init(fd.fileno()) + log_trace = logger.log_trace(0, self.make_metainterp_sd(), None) + op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) + log_trace.write([], [op]) + #the next line will close 'fd' + fd.close() + logger.finish() + binary = file.read() + assert binary.startswith(b'\x00\x04\x00\x00\x00loop') + assert binary.endswith(b'\x24\x06\x00\x00\x00string\x00\x00\x00\x00\x00\x00\x00\x00') diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -678,6 +678,23 @@ drivername = jitdriver.name else: drivername = '' + # get_location new API + get_location_ptr = self.jitdriver_sd._get_location_ptr + if get_location_ptr is None: + missing = '(%s: no get_location)' % drivername + def get_location_str(greenkey): + return missing + else: + unwrap_greenkey = self.make_unwrap_greenkey() + def get_location_str(greenkey): + greenargs = unwrap_greenkey(greenkey) + fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) + llres = fn(*greenargs) + if not we_are_translated() and isinstance(llres, tuple): + return llres + return llres # TODO hltuple? + self.get_location = get_location + # get_location_ptr = self.jitdriver_sd._get_printable_location_ptr if get_location_ptr is None: missing = '(%s: no get_printable_location)' % drivername diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -604,7 +604,7 @@ get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, name='jitdriver', check_untranslated=True, vectorize=False, - get_unique_id=None, is_recursive=False): + get_unique_id=None, is_recursive=False, get_location=None): if greens is not None: self.greens = greens self.name = name @@ -638,6 +638,7 @@ assert get_jitcell_at is None, "get_jitcell_at no longer used" assert set_jitcell_at is None, "set_jitcell_at no longer used" self.get_printable_location = get_printable_location + self.get_location = get_location if get_unique_id is None: get_unique_id = lambda *args: 0 self.get_unique_id = get_unique_id From pypy.commits at gmail.com Tue Apr 12 10:05:02 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Apr 2016 07:05:02 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: more Message-ID: <570d008e.858e1c0a.1ea7b.6184@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5636:d80eeb4f9ad1 Date: 2016-04-12 16:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/d80eeb4f9ad1/ Log: more diff --git a/planning/misc.txt b/planning/misc.txt --- a/planning/misc.txt +++ b/planning/misc.txt @@ -3,6 +3,8 @@ minor/major collections: order the visit to the objects by... address? +using two lists of pointers, popping from one and appending to the +other, and when the first one is empty, sort the other and swap them make the resizing of dict/lists more GC-aware @@ -56,3 +58,12 @@ avoid MOVSD/MOVSS between registers; do a full copy with MOVAPD or MOVDQA + +on CPUs with 'emsr' in /proc/cpuinfo, a memcpy is documented as best +implemented as simply REP MOVSB. But measures on my CPU show it's not +the case. E.g. sizes <= 128 are three times faster if done by a call +to memcpy than by REP MOVSB, for any size. + +still, look at replacing CALLs to memcpy by, say, up to 3 pairs of +MOVs of 1, 2, 4, 8 or 16 bytes each. it is a win also because it +doesn't force specific registers From pypy.commits at gmail.com Tue Apr 12 10:24:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 07:24:01 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) execute patch_stack_checks before materialize loop, translation issue fixed Message-ID: <570d0501.c653c20a.3963e.ffffaf8d@mx.google.com> Author: Richard Plangger Branch: Changeset: r83617:4fd5a8562c39 Date: 2016-04-12 16:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4fd5a8562c39/ Log: (s390x) execute patch_stack_checks before materialize loop, translation issue fixed diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -644,6 +644,7 @@ full_size = self.mc.get_relative_pos() # self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + # if not we_are_translated(): self.mc.trap() # should be never reached rawstart = self.materialize_loop(looptoken) @@ -706,8 +707,8 @@ operations = regalloc.prepare_bridge(inputargs, arglocs, operations, allgcrefs, self.current_clt.frame_info) - startpos = len(allgcrefs) * WORD self.pool.pre_assemble(self, operations, allgcrefs, bridge=True) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - bridgestartpos)) @@ -717,9 +718,10 @@ self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # + self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + # rawstart = self.materialize_loop(original_loop_token) self.patch_gcref_table(original_loop_token, rawstart) - self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) debug_start("jit-backend-addr") debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % (r_uint(descr_number), r_uint(rawstart + startpos), @@ -742,7 +744,7 @@ self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos, + return AsmInfo(ops_offset, rawstart + startpos, codeendpos - startpos, rawstart + bridgestartpos) def patch_gcref_table(self, looptoken, rawstart): diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -11,6 +11,9 @@ from rpython.jit.backend.zarch.arch import (WORD, RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET) from rpython.rlib.longlong2float import float2longlong +from rpython.jit.metainterp.history import (ConstFloat, + ConstInt, ConstPtr) + class PoolOverflow(Exception): pass @@ -58,14 +61,14 @@ return self.offset_map[uvalue] def unique_value(self, val): - if val.type == FLOAT: + if isinstance(val, ConstFloat): if val.getfloat() == 0.0: return 0 return float2longlong(val.getfloat()) - elif val.type == INT: + elif isinstance(val, ConstInt): return rffi.cast(lltype.Signed, val.getint()) else: - assert val.type == REF + assert isinstance(val, ConstPtr) return rffi.cast(lltype.Signed, val.getref_base()) def reserve_literal(self, size, box, asm): diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -25,5 +25,5 @@ return cpu add_loop_instructions = "lg; lgr; larl; agr; cgfi; jge; j;$" - bridge_loop_instructions = "larl; lg; cgfi; jnl; lghi; " \ - "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; (lgfi|iilf);( iihf;)? br;$" + bridge_loop_instructions = "lg; cgfi; jnl; lghi; " \ + "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; larl; (lgfi|iilf);( iihf;)? br;$" From pypy.commits at gmail.com Tue Apr 12 10:45:24 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 07:45:24 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) jumping to the wrong address Message-ID: <570d0a04.839a1c0a.67c51.ffffd514@mx.google.com> Author: Richard Plangger Branch: Changeset: r83618:c893449515d6 Date: 2016-04-12 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/c893449515d6/ Log: (s390x) jumping to the wrong address diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -648,8 +648,8 @@ if not we_are_translated(): self.mc.trap() # should be never reached rawstart = self.materialize_loop(looptoken) + self.patch_gcref_table(looptoken, rawstart) looptoken._ll_function_addr = rawstart + functionpos - self.patch_gcref_table(looptoken, rawstart) # looptoken._ll_loop_code = looppos + rawstart debug_start("jit-backend-addr") @@ -707,6 +707,7 @@ operations = regalloc.prepare_bridge(inputargs, arglocs, operations, allgcrefs, self.current_clt.frame_info) + # reserve gcref table is handled in pre_assemble self.pool.pre_assemble(self, operations, allgcrefs, bridge=True) startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) @@ -734,7 +735,7 @@ debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard - self.patch_jump_for_descr(faildescr, rawstart + bridgestartpos) + self.patch_jump_for_descr(faildescr, rawstart + startpos) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) From pypy.commits at gmail.com Tue Apr 12 11:23:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 08:23:03 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) store_force_index didnt use the new descr index Message-ID: <570d12d7.6614c20a.ebfb5.ffff8e85@mx.google.com> Author: Richard Plangger Branch: Changeset: r83619:ca0843440e9f Date: 2016-04-12 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/ca0843440e9f/ Log: (s390x) store_force_index didnt use the new descr index diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -358,8 +358,9 @@ guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.load_imm(r.SCRATCH, rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr))) + # + faildescrindex = self.get_gcref_from_faildescr(faildescr) + self.load_gcref_into(r.SCRATCH, faildescrindex) self.mc.STG(r.SCRATCH, l.addr(ofs, r.SPP)) def _find_nearby_operation(self, regalloc, delta): From pypy.commits at gmail.com Tue Apr 12 11:46:41 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 08:46:41 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: merge default into branch Message-ID: <570d1861.c3941c0a.2733a.ffffe2b1@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83620:c91506c55909 Date: 2016-04-12 18:22 +0300 http://bitbucket.org/pypy/pypy/changeset/c91506c55909/ Log: merge default into branch diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1002,12 +1002,6 @@ functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1016,15 +1010,6 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: - header = decls[header_name] = [] - header.append("#ifndef _PYPY_%s\n" % - header_name.upper().replace('.','_')) - header.append("#define _PYPY_%s\n" % - header_name.upper().replace('.','_')) - header.append("#ifndef PYPY_STANDALONE\n") - header.append("#ifdef __cplusplus") - header.append("extern \"C\" {") - header.append("#endif\n") header.append('#define Signed long /* xxx temporary fix */\n') header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: @@ -1067,12 +1052,6 @@ header = decls[header_name] header.append('#undef Signed /* xxx temporary fix */\n') header.append('#undef Unsigned /* xxx temporary fix */\n') - header.append("#ifdef __cplusplus") - header.append("}") - header.append("#endif") - header.append("#endif /*PYPY_STANDALONE*/\n") - header.append("#endif /*_PYPY_%s_H*/\n" % - header_name.upper().replace('.','_')) for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -132,7 +132,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -78,7 +78,11 @@ /* API functions. */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE #include "pypy_structmember_decl.h" +#endif #ifdef __cplusplus diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -2,7 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.structmemberdefs import * -from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api +from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api, CONST_STRING from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref @@ -34,7 +34,7 @@ _HEADER = 'pypy_structmember_decl.h' - at cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) + at cpython_api([CONST_STRING, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset @@ -85,7 +85,7 @@ return w_result - at cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, + at cpython_api([rffi.CCHARP, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, error=-1, header=_HEADER) def PyMember_SetOne(space, obj, w_member, w_value): addr = rffi.cast(ADDR, obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -271,17 +271,32 @@ def member_getter(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - return PyMember_GetOne(space, w_self, self.member) + pyref = make_ref(space, w_self) + try: + return PyMember_GetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member) + finally: + Py_DecRef(space, pyref) def member_delete(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, None) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, None) + finally: + Py_DecRef(space, pyref) def member_setter(self, space, w_self, w_value): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, w_value) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, w_value) + finally: + Py_DecRef(space, pyref) class W_PyCTypeObject(W_TypeObject): @jit.dont_look_inside diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -12,7 +12,6 @@ from rpython.jit.metainterp.history import AbstractFailDescr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rclass -from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1,5 +1,5 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, - debug_bridge, DEBUG_COUNTER) + DEBUG_COUNTER) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport import jitframe, rewrite from rpython.jit.backend.model import CompiledLoopToken @@ -78,7 +78,6 @@ self.mc = None self.pool = None - def target_arglocs(self, looptoken): return looptoken._zarch_arglocs @@ -131,28 +130,22 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 - # POOL - #pool_offset = guardtok._pool_offset - #assert pool_offset != -1 - # overwrite the gcmap in the jitframe - #offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET - #self.mc.LG(r.SCRATCH2, l.pool(offset)) - ## overwrite the target in pool - #offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET - ## overwrite!! - #self.pool.overwrite_64(self.mc, offset, target) - #self.mc.LG(r.r14, l.pool(offset)) + self.load_gcref_into(r.SCRATCH, faildescrindex) self.load_gcmap(self.mc, r.SCRATCH2, gcmap=guardtok.gcmap) self.mc.load_imm(r.r14, target) - self.mc.load_imm(r.SCRATCH, fail_descr) self.mc.BCR(c.ANY, r.r14) return startpos + def load_gcref_into(self, register, index): + topoff = index * WORD + size = self.pool.gcref_table_size + self.mc.LG(register, l.addr(-size + topoff, r.POOL)) + def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): descr = self.cpu.gc_ll_descr.write_barrier_descr if descr is None: @@ -625,7 +618,6 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: @@ -634,10 +626,12 @@ regalloc = Regalloc(assembler=self) # + allgcrefs = [] operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) - self.pool.pre_assemble(self, operations) - entrypos = self.mc.get_relative_pos() + looptoken, allgcrefs) + # reserve_gcref_table is handled in pool + self.pool.pre_assemble(self, operations, allgcrefs) + functionpos = self.mc.get_relative_pos() self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, @@ -645,14 +639,17 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - #self.pool.post_assemble(self) + # self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + # if not we_are_translated(): self.mc.trap() # should be never reached rawstart = self.materialize_loop(looptoken) + self.patch_gcref_table(looptoken, rawstart) + looptoken._ll_function_addr = rawstart + functionpos # looptoken._ll_loop_code = looppos + rawstart debug_start("jit-backend-addr") @@ -660,8 +657,15 @@ looptoken.number, loopname, r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), - r_uint(rawstart))) + r_uint(rawstart + functionpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") + # self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -670,7 +674,6 @@ looptoken._zarch_rawstart = rawstart looptoken._zarch_fullsize = full_size looptoken._zarch_ops_offset = ops_offset - looptoken._ll_function_addr = rawstart + entrypos if logger: logger.log_loop(inputargs, operations, 0, "rewritten", name=loopname, ops_offset=ops_offset) @@ -683,7 +686,7 @@ # self.cpu.profile_agent.native_code_written(name, # rawstart, full_size) return AsmInfo(ops_offset, rawstart + looppos, - size_excluding_failure_stuff - looppos) + size_excluding_failure_stuff - looppos, rawstart) @rgc.no_release_gil def assemble_bridge(self, faildescr, inputargs, operations, @@ -700,14 +703,16 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, - operations, - self.current_clt.allgcrefs, + operations, allgcrefs, self.current_clt.frame_info) - self.pool.pre_assemble(self, operations, bridge=True) + # reserve gcref table is handled in pre_assemble + self.pool.pre_assemble(self, operations, allgcrefs, bridge=True) startpos = self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) + bridgestartpos = self.mc.get_relative_pos() + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - bridgestartpos)) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() #self.pool.post_assemble(self) @@ -715,8 +720,19 @@ fullsize = self.mc.get_relative_pos() # self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + # rawstart = self.materialize_loop(original_loop_token) - debug_bridge(descr_number, rawstart, codeendpos) + self.patch_gcref_table(original_loop_token, rawstart) + debug_start("jit-backend-addr") + debug_print("bridge out of Guard 0x%x has address 0x%x to 0x%x" % + (r_uint(descr_number), r_uint(rawstart + startpos), + r_uint(rawstart + codeendpos))) + debug_print(" gc table: 0x%x" % r_uint(self.gc_table_addr)) + debug_print(" jump target: 0x%x" % r_uint(rawstart + startpos)) + debug_print(" resops: 0x%x" % r_uint(rawstart + bridgestartpos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + codeendpos)) + debug_print(" end: 0x%x" % r_uint(rawstart + fullsize)) + debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart + startpos) @@ -729,7 +745,22 @@ self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + return AsmInfo(ops_offset, rawstart + startpos, codeendpos - startpos, + rawstart + bridgestartpos) + + def patch_gcref_table(self, looptoken, rawstart): + self.gc_table_addr = rawstart + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers def patch_jump_for_descr(self, faildescr, adr_new_target): # 'faildescr.adr_jump_offset' is the address of an instruction that is a @@ -996,10 +1027,6 @@ for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset # - # POOL - #tok.faildescr.adr_jump_offset = rawstart + \ - # self.pool.pool_start + tok._pool_offset + \ - # RECOVERY_TARGET_POOL_OFFSET tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub relative_target = tok.pos_recovery_stub - tok.pos_jump_offset # @@ -1199,11 +1226,6 @@ # to be executed, thus remove the first opcode self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: - # POOL - #offset = self.pool.get_descr_offset(descr) + \ - # JUMPABS_TARGET_ADDR__POOL_OFFSET - #self.mc.LG(r.SCRATCH, l.pool(offset)) - #self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code) self.mc.load_imm(r.SCRATCH, descr._ll_loop_code) self.mc.BCR(c.ANY, r.SCRATCH) @@ -1211,8 +1233,8 @@ def emit_finish(self, op, arglocs, regalloc): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) > 1: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs if op.getarg(0).type == FLOAT: if return_val.is_in_pool(): self.mc.LDY(r.FP_SCRATCH, return_val) @@ -1223,8 +1245,6 @@ self.mc.LG(r.SCRATCH, return_val) return_val = r.SCRATCH self.mc.STG(return_val, l.addr(base_ofs, r.SPP)) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') @@ -1247,7 +1267,9 @@ gcmap = lltype.nullptr(jitframe.GCMAP) self.load_gcmap(self.mc, r.r9, gcmap) - self.mc.load_imm(r.r10, fail_descr_loc.getint()) + descr = op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(descr) + self.load_gcref_into(r.r10, faildescrindex) self.mc.STG(r.r9, l.addr(ofs2, r.SPP)) self.mc.STG(r.r10, l.addr(ofs, r.SPP)) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -31,9 +31,9 @@ class ZARCHGuardToken(GuardToken): def __init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth, fcond=c.cond_none): + guard_opnum, frame_depth, faildescrindex, fcond=c.cond_none): GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond class AbstractZARCHBuilder(object): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -632,12 +632,19 @@ def build_guard_token(self, op, frame_depth, arglocs, fcond): descr = op.getdescr() gcmap = allocate_gcmap(self, frame_depth, r.JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = ZARCHGuardToken(self.cpu, gcmap, descr, op.getfailargs(), arglocs, op.getopnum(), frame_depth, - fcond) + faildescrindex, fcond) #token._pool_offset = self.pool.get_descr_offset(descr) return token + def emit_load_from_gc_table(self, op, arglocs, regalloc): + resloc, = arglocs + index = op.getarg(0).getint() + assert resloc.is_reg() + self.load_gcref_into(resloc, index) + def emit_guard_true(self, op, arglocs, regalloc): self._emit_guard(op, arglocs) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -11,6 +11,9 @@ from rpython.jit.backend.zarch.arch import (WORD, RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET) from rpython.rlib.longlong2float import float2longlong +from rpython.jit.metainterp.history import (ConstFloat, + ConstInt, ConstPtr) + class PoolOverflow(Exception): pass @@ -58,14 +61,14 @@ return self.offset_map[uvalue] def unique_value(self, val): - if val.type == FLOAT: + if isinstance(val, ConstFloat): if val.getfloat() == 0.0: return 0 return float2longlong(val.getfloat()) - elif val.type == INT: + elif isinstance(val, ConstInt): return rffi.cast(lltype.Signed, val.getint()) else: - assert val.type == REF + assert isinstance(val, ConstPtr) return rffi.cast(lltype.Signed, val.getref_base()) def reserve_literal(self, size, box, asm): @@ -99,21 +102,23 @@ self.size = val assert val >= 0 - def pre_assemble(self, asm, operations, bridge=False): - # O(len(operations)). I do not think there is a way - # around this. - # + def pre_assemble(self, asm, operations, allgcrefs, bridge=False): # Problem: # constants such as floating point operations, plain pointers, # or integers might serve as parameter to an operation. thus - # it must be loaded into a register. There is a space benefit - # for 64-bit integers, or python floats, when a constant is used - # twice. + # it must be loaded into a register. Loading them from immediate + # takes quite long and slows down the resulting JIT code. + # There is a space benefit for 64-bit integers/doubles used twice. # - # Solution: - # the current solution (gcc does the same), use a literal pool - # located at register r13. This one can easily offset with 20 - # bit signed values (should be enough) + # creates the table for gc references here + self.gc_table_addr = asm.mc.get_relative_pos() + self.gcref_table_size = len(allgcrefs) * WORD + mc = asm.mc + assert mc.get_relative_pos() == 0 + for i in range(self.gcref_table_size): + mc.writechar('\x00') + asm.setup_gcrefs_list(allgcrefs) + self.pool_start = asm.mc.get_relative_pos() for op in operations: self.ensure_can_hold_constants(asm, op) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1215,18 +1215,16 @@ src_locations2, dst_locations2, fptmploc, WORD) return [] + def prepare_load_from_gc_table(self, op): + resloc = self.rm.force_allocate_reg(op) + return [resloc] + def prepare_finish(self, op): - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) - assert fail_descr > 0 if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs def notimplemented(self, op): diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -25,5 +25,5 @@ return cpu add_loop_instructions = "lg; lgr; larl; agr; cgfi; jge; j;$" - bridge_loop_instructions = "larl; lg; cgfi; jnl; lghi; " \ - "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; (lgfi|iilf);( iihf;)? br;$" + bridge_loop_instructions = "lg; cgfi; jnl; lghi; " \ + "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; larl; (lgfi|iilf);( iihf;)? br;$" From pypy.commits at gmail.com Tue Apr 12 11:46:44 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 08:46:44 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: fix merge Message-ID: <570d1864.8bd31c0a.25a49.ffffc4f9@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83621:b09a9eaf4abf Date: 2016-04-12 18:35 +0300 http://bitbucket.org/pypy/pypy/changeset/b09a9eaf4abf/ Log: fix merge diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1010,6 +1010,7 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: + header = decls[header_name] = [] header.append('#define Signed long /* xxx temporary fix */\n') header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: From pypy.commits at gmail.com Tue Apr 12 12:11:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Apr 2016 09:11:16 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Use path_or_fd unwrapper in utime() Message-ID: <570d1e24.ca941c0a.47ac7.658f@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83622:901c899d400f Date: 2016-04-12 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/901c899d400f/ Log: Use path_or_fd unwrapper in utime() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1302,9 +1302,11 @@ return space.wrap(ret) - at unwrap_spec(w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), + at unwrap_spec( + path=path_or_fd, + w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) -def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): +def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. @@ -1355,19 +1357,22 @@ mtime_s, mtime_ns = convert_ns(space, args_w[1]) if rposix.HAVE_UTIMENSAT: - path = space.fsencode_w(w_path) + path_b = path.as_bytes + if path_b is None: + raise oefmt(space.w_NotImplementedError, + "utime: unsupported value for 'path'") try: if utime_now: rposix.utimensat( - path, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, + path_b, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, dir_fd=dir_fd, follow_symlinks=follow_symlinks) else: rposix.utimensat( - path, atime_s, atime_ns, mtime_s, mtime_ns, + path_b, atime_s, atime_ns, mtime_s, mtime_ns, dir_fd=dir_fd, follow_symlinks=follow_symlinks) return except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror(space, e) if not follow_symlinks: raise argument_unavailable(space, "utime", "follow_symlinks") @@ -1377,10 +1382,15 @@ "utime: 'ns' unsupported on this platform on PyPy") if utime_now: try: - dispatch_filename(rposix.utime, 1)(space, w_path, None) + if path.as_unicode is not None: + rposix.utime(path.as_unicode, None) + else: + path_b = path.as_bytes + assert path_b is not None + rposix.utime(path.as_bytes, None) return - except OSError, e: - raise wrap_oserror2(space, e, w_path) + except OSError as e: + raise wrap_oserror(space, e) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" args_w = space.fixedview(w_times) @@ -1388,13 +1398,19 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) actime = space.float_w(args_w[0], allow_conversion=False) modtime = space.float_w(args_w[1], allow_conversion=False) - dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) - except OSError, e: - raise wrap_oserror2(space, e, w_path) except OperationError, e: if not e.match(space, space.w_TypeError): raise raise OperationError(space.w_TypeError, space.wrap(msg)) + try: + if path.as_unicode is not None: + rposix.utime(path.as_unicode, (actime, modtime)) + else: + path_b = path.as_bytes + assert path_b is not None + rposix.utime(path_b, (actime, modtime)) + except OSError as e: + raise wrap_oserror(space, e) def convert_seconds(space, w_time): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -250,7 +250,7 @@ try: self.posix.utime('qowieuqw/oeiu', arg) except OSError as e: - assert e.filename == 'qowieuqw/oeiu' + pass else: assert 0 From pypy.commits at gmail.com Tue Apr 12 12:50:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Apr 2016 09:50:46 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Enable fd support in utime() Message-ID: <570d2766.c3941c0a.2733a.fffffb64@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83623:af34944fdc16 Date: 2016-04-12 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/af34944fdc16/ Log: Enable fd support in utime() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -120,7 +120,7 @@ self.as_bytes = bytes self.as_unicode = unicode -class path_or_fd(Unwrapper): +class _PathOrFd(Unwrapper): def unwrap(self, space, w_value): if _WIN32: try: @@ -139,6 +139,23 @@ fd = unwrap_fd(space, w_value) return Path(fd, None, None) +class _JustPath(Unwrapper): + def unwrap(self, space, w_value): + if _WIN32: + try: + path_u = space.unicode_w(w_value) + return Path(-1, None, path_u) + except OperationError: + pass + try: + path_b = space.fsencode_w(w_value) + return Path(-1, path_b, None) + except OperationError: + raise oefmt(space.w_TypeError, "illegal type for path parameter") + +def path_or_fd(allow_fd=True): + return _PathOrFd if allow_fd else _JustPath + if hasattr(rposix, 'AT_FDCWD'): DEFAULT_DIR_FD = rposix.AT_FDCWD @@ -1303,7 +1320,7 @@ @unwrap_spec( - path=path_or_fd, + path=path_or_fd(allow_fd=rposix.HAVE_FUTIMENS), w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): @@ -1356,6 +1373,21 @@ atime_s, atime_ns = convert_ns(space, args_w[0]) mtime_s, mtime_ns = convert_ns(space, args_w[1]) + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "utime: can't specify both dir_fd and fd") + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "utime: cannot use fd and follow_symlinks together") + if utime_now: + atime_ns = mtime_ns = rposix.UTIME_NOW + try: + rposix.futimens(path.as_fd, atime_s, atime_ns, mtime_s, mtime_ns) + return + except OSError as e: + raise wrap_oserror(space, e) + if rposix.HAVE_UTIMENSAT: path_b = path.as_bytes if path_b is None: From pypy.commits at gmail.com Tue Apr 12 13:31:54 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Apr 2016 10:31:54 -0700 (PDT) Subject: [pypy-commit] pypy fix-jitlog: close branch Message-ID: <570d310a.aa5ec20a.1a60a.ffffc0ad@mx.google.com> Author: Armin Rigo Branch: fix-jitlog Changeset: r83624:12c9d7818122 Date: 2016-04-12 19:30 +0200 http://bitbucket.org/pypy/pypy/changeset/12c9d7818122/ Log: close branch From pypy.commits at gmail.com Tue Apr 12 13:31:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Apr 2016 10:31:56 -0700 (PDT) Subject: [pypy-commit] pypy default: (edd, palecsandru): Fixed the shown offset values in the jitlog, add a test Message-ID: <570d310c.0173c20a.d3d86.ffffd3e3@mx.google.com> Author: Armin Rigo Branch: Changeset: r83625:9a50e305bb7c Date: 2016-04-12 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/9a50e305bb7c/ Log: (edd, palecsandru): Fixed the shown offset values in the jitlog, add a test diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -195,7 +195,8 @@ if ops_offset is None: offset = -1 else: - offset = ops_offset.get(op, -1) + final_op = op.get_box_replacement() + offset = ops_offset.get(final_op, -1) if offset == -1: s_offset = "" else: diff --git a/rpython/jit/metainterp/test/test_logger.py b/rpython/jit/metainterp/test/test_logger.py --- a/rpython/jit/metainterp/test/test_logger.py +++ b/rpython/jit/metainterp/test/test_logger.py @@ -242,3 +242,51 @@ +30: jump(i4) +40: --end of the loop-- """.strip() + + def test_ops_offset_with_forward(self): + inp = ''' + [i0] + i1 = int_add(i0, 4) + i2 = int_mul(i0, 8) + jump(i2) + ''' + loop = pure_parse(inp) + ops = loop.operations + + # again to get new ops with different identities to existing ones + loop2 = pure_parse(inp) + ops2 = loop.operations + + # Suppose a re-write occurs which replaces the operations with these. + # The add 4 became a sub -4. The others are the same, but have a + # different address, thus still require forwarding. + inp2 = ''' + [i0] + i1 = int_sub(i0, -4) + i2 = int_mul(i0, 8) + jump(i2) + ''' + loop2 = pure_parse(inp2) + ops2 = loop2.operations + + # Add forwarding + for i in xrange(3): + ops[i].set_forwarded(ops2[i]) + + # So the offsets are keyed by ops2 instances + ops_offset = { + ops2[0]: 10, + ops2[1]: 20, + ops2[2]: 30, + None: 40 + } + + logger = Logger(self.make_metainterp_sd()) + output = logger.log_loop(loop, ops_offset=ops_offset, name="foo") + + # The logger should have followed the forwarding pointers + lines = output.strip().splitlines() + assert lines[2].startswith("+10") + assert lines[3].startswith("+20") + assert lines[4].startswith("+30") + assert lines[5].startswith("+40") From pypy.commits at gmail.com Tue Apr 12 13:31:57 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Apr 2016 10:31:57 -0700 (PDT) Subject: [pypy-commit] pypy default: mention branch Message-ID: <570d310d.e7bec20a.ac70.ffffc1ce@mx.google.com> Author: Armin Rigo Branch: Changeset: r83626:883c6bcbaacf Date: 2016-04-12 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/883c6bcbaacf/ Log: mention branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,5 @@ the ConstPtrs in a separate table, and loading from the table. It gives improved warm-up time and memory usage, and also removes annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog From pypy.commits at gmail.com Tue Apr 12 15:19:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Apr 2016 12:19:12 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/33_fix_itertools (pull request #424) Message-ID: <570d4a30.4a231c0a.67b73.2e80@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83634:255a852e6292 Date: 2016-04-12 20:18 +0100 http://bitbucket.org/pypy/pypy/changeset/255a852e6292/ Log: Merged in marky1991/pypy_new/33_fix_itertools (pull request #424) Py3k: Fix itertools tests diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -1728,6 +1728,7 @@ class LengthTransparency(unittest.TestCase): + @support.impl_detail("__length_hint__() API is undocumented") def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -40,6 +40,7 @@ 'cycle' : 'interp_itertools.W_Cycle', 'dropwhile' : 'interp_itertools.W_DropWhile', 'groupby' : 'interp_itertools.W_GroupBy', + '_groupby' : 'interp_itertools.W_GroupByIterator', 'filterfalse' : 'interp_itertools.W_FilterFalse', 'islice' : 'interp_itertools.W_ISlice', 'permutations' : 'interp_itertools.W_Permutations', diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -114,6 +114,15 @@ else: s = 'repeat(%s)' % (objrepr,) return self.space.wrap(s) + + def descr_reduce(self): + space = self.space + if self.counting: + args_w = [self.w_obj, space.wrap(self.count)] + else: + args_w = [self.w_obj] + return space.newtuple([space.gettypefor(W_Repeat), + space.newtuple(args_w)]) def W_Repeat___new__(space, w_subtype, w_object, w_times=None): r = space.allocate_instance(W_Repeat, w_subtype) @@ -127,6 +136,7 @@ __length_hint__ = interp2app(W_Repeat.length_w), __next__ = interp2app(W_Repeat.next_w), __repr__ = interp2app(W_Repeat.repr_w), + __reduce__ = interp2app(W_Repeat.descr_reduce), __doc__ = """Make an iterator that returns object over and over again. Runs indefinitely unless the times argument is specified. Used as argument to imap() for invariant parameters to the called @@ -268,6 +278,10 @@ class W_FilterFalse(W_Filter): reverse = True + def descr_reduce(self, space): + args_w = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([space.type(self), space.newtuple(args_w)]) def W_FilterFalse___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_FilterFalse, w_subtype) @@ -279,6 +293,7 @@ __new__ = interp2app(W_FilterFalse___new__), __iter__ = interp2app(W_FilterFalse.iter_w), __next__ = interp2app(W_FilterFalse.next_w), + __reduce__ = interp2app(W_FilterFalse.descr_reduce), __doc__ = """Make an iterator that filters elements from iterable returning only those for which the predicate is False. If predicate is None, return the items that are false. @@ -448,11 +463,16 @@ return self.space.wrap(self) def _advance(self): + if self.w_iterables is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): if not self.w_it: - self._advance() + try: + self._advance() + except OperationError, e: + raise e try: return self.space.next(self.w_it) except OperationError, e: @@ -462,12 +482,45 @@ while True: if not e.match(self.space, self.space.w_StopIteration): raise e - self._advance() # may raise StopIteration itself + try: + self._advance() # may raise StopIteration itself + except OperationError, e: + self.w_iterables = None + raise e try: return self.space.next(self.w_it) except OperationError, e: pass # loop back to the start of _handle_error(e) + def descr_reduce(self, space): + if self.w_iterables is not None: + if self.w_it is not None: + inner_contents = [self.w_iterables, self.w_it] + else: + inner_contents = [self.w_iterables] + result_w = [space.type(self), + space.newtuple([]), + space.newtuple(inner_contents)] + else: + result_w = [space.type(self), + space.newtuple([])] + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + num_args = len(state) + if num_args < 1: + raise OperationError(space.w_TypeError, + space.wrap("function takes at least 1 argument " + "(" + str(num_args) + " given)")) + elif num_args == 1: + self.w_iterables = state[0] + elif num_args == 2: + self.w_iterables, self.w_it = state + else: + raise OperationError(space.w_TypeError, + space.wrap("function takes at most 2 arguments " + "(" + str(num_args) + " given)")) + def W_Chain___new__(space, w_subtype, args_w): r = space.allocate_instance(W_Chain, w_subtype) w_args = space.newtuple(args_w) @@ -488,6 +541,8 @@ __new__ = interp2app(W_Chain___new__), __iter__ = interp2app(W_Chain.iter_w), __next__ = interp2app(W_Chain.next_w), + __reduce__ = interp2app(W_Chain.descr_reduce), + __setstate__ = interp2app(W_Chain.descr_setstate), from_iterable = interp2app(chain_from_iterable, as_classmethod=True), __doc__ = """Make an iterator that returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until @@ -538,6 +593,23 @@ raise OperationError(self.space.w_StopIteration, self.space.w_None) return [self._fetch(index) for index in range(nb)] + def descr_reduce(self, space): + result_w = [space.type(self)] + + if self.iterators_w is not None: + iterators = [iterator if iterator is not None else space.newtuple([]) + for iterator in self.iterators_w] + iterators = space.newtuple(iterators) + else: + iterators = space.newtuple([]) + result_w = [space.type(self), + iterators, + self.w_fillvalue] + + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + self.w_fillvalue = w_state + def W_ZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() w_fillvalue = space.w_None @@ -561,6 +633,8 @@ __new__ = interp2app(W_ZipLongest___new__), __iter__ = interp2app(W_ZipLongest.iter_w), __next__ = interp2app(W_ZipLongest.next_w), + __reduce__ = interp2app(W_ZipLongest.descr_reduce), + __setstate__ = interp2app(W_ZipLongest.descr_setstate), __doc__ = """Return a zip_longest object whose .next() method returns a tuple where the i-th element comes from the i-th iterable argument. The .next() method continues until the longest iterable in the argument sequence @@ -669,6 +743,13 @@ w_obj = self.space.next(self.w_iterable) return self.space.call(self.w_fun, w_obj) + def descr_reduce(self): + return self.space.newtuple([self.space.gettypefor(W_StarMap), + self.space.newtuple([ + self.w_fun, + self.w_iterable]) + ]) + def W_StarMap___new__(space, w_subtype, w_fun, w_iterable): r = space.allocate_instance(W_StarMap, w_subtype) r.__init__(space, w_fun, w_iterable) @@ -679,6 +760,7 @@ __new__ = interp2app(W_StarMap___new__), __iter__ = interp2app(W_StarMap.iter_w), __next__ = interp2app(W_StarMap.next_w), + __reduce__ = interp2app(W_StarMap.descr_reduce), __doc__ = """Make an iterator that computes the function using arguments tuples obtained from the iterable. Used instead of imap() when argument parameters are already grouped in tuples from a single @@ -871,6 +953,37 @@ self.lookahead = True self.new_group = True #new group raise StopIteration + def descr_reduce(self, space): + if self.started: + return space.newtuple([ + space.type(self), + space.newtuple([ + self.w_iterable, + self.w_fun]), + space.newtuple([ + self.w_key, + self.w_lookahead, + self.w_key]) + ]) + else: + return space.newtuple([ + space.type(self), + space.newtuple([ + self.w_iterable, + self.w_fun])]) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + num_args = len(state) + if num_args != 3: + raise OperationError(space.w_TypeError, + space.wrap("function takes exactly 3 arguments " + "(" + str(num_args) + " given)")) + w_key, w_lookahead, _ = state + self.w_key = w_key + self.w_lookahead = w_lookahead + if self.w_lookahead: + self.started = True + self.lookahead = True def W_GroupBy___new__(space, w_subtype, w_iterable, w_key=None): r = space.allocate_instance(W_GroupBy, w_subtype) @@ -882,6 +995,8 @@ __new__ = interp2app(W_GroupBy___new__), __iter__ = interp2app(W_GroupBy.iter_w), __next__ = interp2app(W_GroupBy.next_w), + __reduce__ = interp2app(W_GroupBy.descr_reduce), + __setstate__ = interp2app(W_GroupBy.descr_setstate), __doc__ = """Make an iterator that returns consecutive keys and groups from the iterable. The key is a function computing a key value for each element. If not specified or is None, key defaults to an identity @@ -924,10 +1039,27 @@ else: return w_obj + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([ + space.wrap(self.index), + space.wrap(self.groupby)]), + ]) + +def W_GroupByIterator__new__(space, w_subtype, w_index, w_groupby): + r = space.allocate_instance(W_GroupByIterator, w_subtype) + index = space.int_w(w_index) + groupby = space.interp_w(W_GroupBy, w_groupby) + r.__init__(space, index, groupby) + return space.wrap(r) + W_GroupByIterator.typedef = TypeDef( 'itertools._groupby', + __new__ = interp2app(W_GroupByIterator__new__), __iter__ = interp2app(W_GroupByIterator.iter_w), - __next__ = interp2app(W_GroupByIterator.next_w)) + __next__ = interp2app(W_GroupByIterator.next_w), + __reduce__ = interp2app(W_GroupByIterator.descr_reduce)) W_GroupByIterator.typedef.acceptable_as_base_class = False @@ -988,10 +1120,12 @@ for gear in self.gears: if len(gear) == 0: self.lst = None + self.stopped = True break else: self.indices = [0] * len(self.gears) - self.lst = [gear[0] for gear in self.gears] + self.lst = None + self.stopped = False def _rotate_previous_gears(self): lst = self.lst @@ -1013,10 +1147,16 @@ x -= 1 else: self.lst = None + self.stopped = True def fill_next_result(self): # the last gear is done here, in a function with no loop, # to allow the JIT to look inside + if self.lst is None: + self.lst = [None for gear in self.gears] + for index, gear in enumerate(self.gears): + self.lst[index] = gear[0] + return lst = self.lst x = len(self.gears) - 1 if x >= 0: @@ -1029,18 +1169,50 @@ else: self._rotate_previous_gears() else: - self.lst = None + self.stopped = True def iter_w(self, space): return space.wrap(self) def next_w(self, space): - if self.lst is None: + if not self.stopped: + self.fill_next_result() + if self.stopped: raise OperationError(space.w_StopIteration, space.w_None) w_result = space.newtuple(self.lst[:]) - self.fill_next_result() return w_result + def descr_reduce(self, space): + if not self.stopped: + gears = [space.newtuple(gear) for gear in self.gears] + result_w = [ + space.type(self), + space.newtuple(gears) + ] + if self.lst is not None: + result_w = result_w + [ + space.newtuple([ + space.wrap(index) for index in self.indices])] + else: + result_w = [ + space.type(self), + space.newtuple([space.newtuple([])]) + ] + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + gear_count = len(self.gears) + indices_w = space.unpackiterable(w_state) + lst = [] + for i, gear in enumerate(self.gears): + w_index = indices_w[i] + index = space.int_w(w_index) + if index < 0: + index = 0 + if index > gear_count - 1: + index = gear_count - 1 + self.indices[i] = index + lst.append(gear[self.indices[i]]) + self.lst = lst def W_Product__new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() @@ -1062,6 +1234,8 @@ __new__ = interp2app(W_Product__new__), __iter__ = interp2app(W_Product.iter_w), __next__ = interp2app(W_Product.next_w), + __reduce__ = interp2app(W_Product.descr_reduce), + __setstate__ = interp2app(W_Product.descr_setstate), __doc__ = """ Cartesian product of input iterables. @@ -1179,7 +1353,7 @@ index = max if index < 0: index = 0 - self.indices.append(index) + self.indices[i] = index self.last_result_w = [ self.pool_w[self.indices[i]] for i in range(self.r)] @@ -1221,6 +1395,41 @@ def max_index(self, j): return self.indices[j - 1] + def descr_reduce(self, space): + if self.stopped: + pool_w = [] + else: + pool_w = self.pool_w + result_w = [ + space.type(self), + space.newtuple([ + space.newtuple(pool_w), space.wrap(self.r) + ])] + if self.last_result_w is not None and not self.stopped: + # we must pickle the indices and use them for setstate + result_w = result_w + [ + space.newtuple([ + space.wrap(index) for index in self.indices])] + return space.newtuple(result_w) + + def descr_setstate(self, space, w_state): + indices_w = space.fixedview(w_state) + if len(indices_w) != self.r: + raise OperationError(space.w_ValueError, space.wrap( + "invalid arguments")) + for i in range(self.r): + index = space.int_w(indices_w[i]) + max = self.get_maximum(i) + # clamp the index (beware of negative max) + if index > max: + index = max + if index < 0: + index = 0 + self.indices[i] = index + self.last_result_w = [ + self.pool_w[self.indices[i]] + for i in range(self.r)] + @unwrap_spec(r=int) def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r): pool_w = space.fixedview(w_iterable) @@ -1237,6 +1446,8 @@ __new__ = interp2app(W_CombinationsWithReplacement__new__), __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), __next__ = interp2app(W_CombinationsWithReplacement.descr_next), + __reduce__ = interp2app(W_CombinationsWithReplacement.descr_reduce), + __setstate__ = interp2app(W_CombinationsWithReplacement.descr_setstate), __doc__ = """\ combinations_with_replacement(iterable, r) --> combinations_with_replacement object @@ -1253,17 +1464,19 @@ n = len(pool_w) n_minus_r = n - r if n_minus_r < 0: - self.stopped = True + self.stopped = self.raised_stop_iteration = True else: - self.stopped = False + self.stopped = self.raised_stop_iteration = False self.indices = range(n) self.cycles = range(n, n_minus_r, -1) + self.started = False def descr__iter__(self, space): return self def descr_next(self, space): if self.stopped: + self.raised_stop_iteration = True raise OperationError(space.w_StopIteration, space.w_None) r = self.r indices = self.indices @@ -1286,7 +1499,63 @@ indices[n1] = num i -= 1 self.stopped = True + if self.started: + raise OperationError(space.w_StopIteration, space.w_None) + else: + self.started = True return w_result + def descr_reduce(self, space): + if self.raised_stop_iteration: + pool_w = [] + else: + pool_w = self.pool_w + result_w = [ + space.type(self), + space.newtuple([ + space.newtuple(pool_w), space.wrap(self.r) + ])] + if not self.raised_stop_iteration: + # we must pickle the indices and use them for setstate + result_w = result_w + [ + space.newtuple([ + space.newtuple([ + space.wrap(index) for index in self.indices]), + space.newtuple([ + space.wrap(num) for num in self.cycles]), + space.wrap(self.started) + ])] + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + if len(state) == 3: + w_indices, w_cycles, w_started = state + indices_w = space.unpackiterable(w_indices) + cycles_w = space.unpackiterable(w_cycles) + self.started = space.bool_w(w_started) + else: + raise OperationError(space.w_ValueError, space.wrap( + "invalid arguments")) + + if len(indices_w) != len(self.pool_w) or len(cycles_w) != self.r: + raise OperationError(space.w_ValueError, space.wrap( + "inavalid arguments")) + + n = len(self.pool_w) + for i in range(n): + index = space.int_w(indices_w[i]) + if index < 0: + index = 0 + elif index > n-1: + index = n-1 + self.indices[i] = index + + for i in range(self.r): + index = space.int_w(cycles_w[i]) + if index < 1: + index = 1 + elif index > n-i: + index = n-i + self.cycles[i] = index def W_Permutations__new__(space, w_subtype, w_iterable, w_r=None): pool_w = space.fixedview(w_iterable) @@ -1302,6 +1571,8 @@ __new__ = interp2app(W_Permutations__new__), __iter__ = interp2app(W_Permutations.descr__iter__), __next__ = interp2app(W_Permutations.descr_next), + __reduce__ = interp2app(W_Permutations.descr_reduce), + __setstate__ = interp2app(W_Permutations.descr_setstate), __doc__ = """\ permutations(iterable[, r]) --> permutations object @@ -1315,7 +1586,7 @@ def __init__(self, space, w_iterable, w_func=None): self.space = space self.w_iterable = w_iterable - self.w_func = w_func + self.w_func = w_func if not space.is_w(w_func, space.w_None) else None self.w_total = None def iter_w(self): @@ -1333,15 +1604,15 @@ self.w_total = self.space.call_function(self.w_func, self.w_total, w_value) return self.w_total - def reduce_w(self): + def descr_reduce(self): space = self.space w_total = space.w_None if self.w_total is None else self.w_total w_func = space.w_None if self.w_func is None else self.w_func return space.newtuple([space.gettypefor(W_Accumulate), space.newtuple([self.w_iterable, w_func]), w_total]) - def setstate_w(self, w_total): - self.w_total = w_total + def descr_setstate(self, space, w_state): + self.w_total = w_state if not space.is_w(w_state, space.w_None) else None def W_Accumulate__new__(space, w_subtype, w_iterable, w_func=None): r = space.allocate_instance(W_Accumulate, w_subtype) @@ -1352,8 +1623,8 @@ __new__ = interp2app(W_Accumulate__new__), __iter__ = interp2app(W_Accumulate.iter_w), __next__ = interp2app(W_Accumulate.next_w), - __reduce__ = interp2app(W_Accumulate.reduce_w), - __setstate__ = interp2app(W_Accumulate.setstate_w), + __reduce__ = interp2app(W_Accumulate.descr_reduce), + __setstate__ = interp2app(W_Accumulate.descr_setstate), __doc__ = """\ "accumulate(iterable) --> accumulate object From pypy.commits at gmail.com Tue Apr 12 15:19:30 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:30 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Work on adding pickling support for the itertools classes. (Doesn't work yet for product) Message-ID: <570d4a42.03dd1c0a.3dcc0.36f6@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83627:f5e53a56f323 Date: 2016-03-13 10:57 -0400 http://bitbucket.org/pypy/pypy/changeset/f5e53a56f323/ Log: Work on adding pickling support for the itertools classes. (Doesn't work yet for product) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -268,6 +268,10 @@ class W_FilterFalse(W_Filter): reverse = True + def descr_reduce(self, space): + args_w = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([space.type(self), space.newtuple(args_w)]) def W_FilterFalse___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_FilterFalse, w_subtype) @@ -279,6 +283,7 @@ __new__ = interp2app(W_FilterFalse___new__), __iter__ = interp2app(W_FilterFalse.iter_w), __next__ = interp2app(W_FilterFalse.next_w), + __reduce__ = interp2app(W_FilterFalse.descr_reduce), __doc__ = """Make an iterator that filters elements from iterable returning only those for which the predicate is False. If predicate is None, return the items that are false. @@ -871,6 +876,25 @@ self.lookahead = True self.new_group = True #new group raise StopIteration + def descr_reduce(self, space): + if self.started: + return space.newtuple([ + space.type(self), + space.newtuple([ + self.w_iterable, + self.w_fun]), + space.newtuple([ + self.w_key, + self.w_lookahead, + self.w_key]) + ]) + else: + return space.newtuple([ + space.type(self), + space.newtuple([ + self.w_iterable, + self.w_fun])]) + def W_GroupBy___new__(space, w_subtype, w_iterable, w_key=None): r = space.allocate_instance(W_GroupBy, w_subtype) @@ -882,6 +906,7 @@ __new__ = interp2app(W_GroupBy___new__), __iter__ = interp2app(W_GroupBy.iter_w), __next__ = interp2app(W_GroupBy.next_w), + __reduce__ = interp2app(W_GroupBy.descr_reduce), __doc__ = """Make an iterator that returns consecutive keys and groups from the iterable. The key is a function computing a key value for each element. If not specified or is None, key defaults to an identity @@ -988,15 +1013,19 @@ for gear in self.gears: if len(gear) == 0: self.lst = None + self.stopped = True break else: self.indices = [0] * len(self.gears) + self.previous_indices = [] self.lst = [gear[0] for gear in self.gears] + self.stopped = False def _rotate_previous_gears(self): lst = self.lst x = len(self.gears) - 1 lst[x] = self.gears[x][0] + self.previous_indices = self.indices[:] self.indices[x] = 0 x -= 1 # the outer loop runs as long as a we have a carry @@ -1025,6 +1054,7 @@ if index < len(gear): # no carry: done lst[x] = gear[index] + self.previous_indices = self.indices[:] self.indices[x] = index else: self._rotate_previous_gears() @@ -1036,11 +1066,30 @@ def next_w(self, space): if self.lst is None: + self.stopped = True raise OperationError(space.w_StopIteration, space.w_None) w_result = space.newtuple(self.lst[:]) self.fill_next_result() return w_result + def descr_reduce(self, space): + if not self.stopped: + gears = [space.newtuple([gear]) for gear in self.gears] + result_w = [ + space.type(self), + space.newtuple(gears) + #space.newtuple([space.newtuple(gear) for gear in self.gears]) + ] + if self.previous_indices: + result_w = result_w + [ + space.newtuple([ + space.wrap(index) for index in self.previous_indices])] + else: + result_w = [ + space.type(self), + space.newtuple([]) + ] + return space.newtuple(result_w) def W_Product__new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() @@ -1062,6 +1111,7 @@ __new__ = interp2app(W_Product__new__), __iter__ = interp2app(W_Product.iter_w), __next__ = interp2app(W_Product.next_w), + __reduce__ = interp2app(W_Product.descr_reduce), __doc__ = """ Cartesian product of input iterables. @@ -1221,6 +1271,23 @@ def max_index(self, j): return self.indices[j - 1] + def descr_reduce(self, space): + if self.stopped: + pool_w = [] + else: + pool_w = self.pool_w + result_w = [ + space.type(self), + space.newtuple([ + space.newtuple(pool_w), space.wrap(self.r) + ])] + if self.last_result_w is not None and not self.stopped: + # we must pickle the indices and use them for setstate + result_w = result_w + [ + space.newtuple([ + space.wrap(index) for index in self.indices])] + return space.newtuple(result_w) + @unwrap_spec(r=int) def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r): pool_w = space.fixedview(w_iterable) @@ -1237,6 +1304,7 @@ __new__ = interp2app(W_CombinationsWithReplacement__new__), __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), __next__ = interp2app(W_CombinationsWithReplacement.descr_next), + __reduce__ = interp2app(W_CombinationsWithReplacement.descr_reduce), __doc__ = """\ combinations_with_replacement(iterable, r) --> combinations_with_replacement object From pypy.commits at gmail.com Tue Apr 12 15:19:32 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:32 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fixed it in pyinteractive (and fixed a correctness issue), but translation is still broken. Message-ID: <570d4a44.46941c0a.999fd.2fa1@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83628:5d1307ba83ee Date: 2016-03-13 13:31 -0400 http://bitbucket.org/pypy/pypy/changeset/5d1307ba83ee/ Log: Fixed it in pyinteractive (and fixed a correctness issue), but translation is still broken. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1074,7 +1074,7 @@ def descr_reduce(self, space): if not self.stopped: - gears = [space.newtuple([gear]) for gear in self.gears] + gears = [space.newtuple(gear) for gear in self.gears] result_w = [ space.type(self), space.newtuple(gears) @@ -1087,7 +1087,7 @@ else: result_w = [ space.type(self), - space.newtuple([]) + space.newtuple([()]) ] return space.newtuple(result_w) From pypy.commits at gmail.com Tue Apr 12 15:19:34 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:34 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fixed translation. Message-ID: <570d4a46.654fc20a.4179b.ffffe834@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83629:fefbe7b206cb Date: 2016-03-13 18:57 -0400 http://bitbucket.org/pypy/pypy/changeset/fefbe7b206cb/ Log: Fixed translation. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1007,7 +1007,7 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.unpackiterable(arg_w) for arg_w in args_w + space.unpackiterable(arg_w)[:] for arg_w in args_w ] * space.int_w(w_repeat) # for gear in self.gears: @@ -1087,7 +1087,7 @@ else: result_w = [ space.type(self), - space.newtuple([()]) + space.newtuple([space.newtuple([])]) ] return space.newtuple(result_w) From pypy.commits at gmail.com Tue Apr 12 15:19:35 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:35 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fix itertools failures. Message-ID: <570d4a47.4e301c0a.63d24.38ef@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83630:5b4752980288 Date: 2016-04-10 01:12 -0400 http://bitbucket.org/pypy/pypy/changeset/5b4752980288/ Log: Fix itertools failures. diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -1728,6 +1728,7 @@ class LengthTransparency(unittest.TestCase): + @support.impl_detail("__length_hint__() API is undocumented") def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -40,6 +40,7 @@ 'cycle' : 'interp_itertools.W_Cycle', 'dropwhile' : 'interp_itertools.W_DropWhile', 'groupby' : 'interp_itertools.W_GroupBy', + '_groupby' : 'interp_itertools.W_GroupByIterator', 'filterfalse' : 'interp_itertools.W_FilterFalse', 'islice' : 'interp_itertools.W_ISlice', 'permutations' : 'interp_itertools.W_Permutations', diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -114,6 +114,15 @@ else: s = 'repeat(%s)' % (objrepr,) return self.space.wrap(s) + + def descr_reduce(self): + space = self.space + if self.counting: + args_w = [self.w_obj, space.wrap(self.count)] + else: + args_w = [self.w_obj] + return space.newtuple([space.gettypefor(W_Repeat), + space.newtuple(args_w)]) def W_Repeat___new__(space, w_subtype, w_object, w_times=None): r = space.allocate_instance(W_Repeat, w_subtype) @@ -127,6 +136,7 @@ __length_hint__ = interp2app(W_Repeat.length_w), __next__ = interp2app(W_Repeat.next_w), __repr__ = interp2app(W_Repeat.repr_w), + __reduce__ = interp2app(W_Repeat.descr_reduce), __doc__ = """Make an iterator that returns object over and over again. Runs indefinitely unless the times argument is specified. Used as argument to imap() for invariant parameters to the called @@ -453,11 +463,16 @@ return self.space.wrap(self) def _advance(self): + if self.w_iterables is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) self.w_it = self.space.iter(self.space.next(self.w_iterables)) def next_w(self): if not self.w_it: - self._advance() + try: + self._advance() + except OperationError, e: + raise e try: return self.space.next(self.w_it) except OperationError, e: @@ -467,12 +482,45 @@ while True: if not e.match(self.space, self.space.w_StopIteration): raise e - self._advance() # may raise StopIteration itself + try: + self._advance() # may raise StopIteration itself + except OperationError, e: + self.w_iterables = None + raise e try: return self.space.next(self.w_it) except OperationError, e: pass # loop back to the start of _handle_error(e) + def descr_reduce(self, space): + if self.w_iterables is not None: + if self.w_it is not None: + inner_contents = [self.w_iterables, self.w_it] + else: + inner_contents = [self.w_iterables] + result_w = [space.type(self), + space.newtuple([]), + space.newtuple(inner_contents)] + else: + result_w = [space.type(self), + space.newtuple([])] + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + num_args = len(state) + if num_args < 1: + raise OperationError(space.w_TypeError, + space.wrap("function takes at least 1 argument " + "(" + str(num_args) + " given)")) + elif num_args == 1: + self.w_iterables = state[0] + elif num_args == 2: + self.w_iterables, self.w_it = state + else: + raise OperationError(space.w_TypeError, + space.wrap("function takes at most 2 arguments " + "(" + str(num_args) + " given)")) + def W_Chain___new__(space, w_subtype, args_w): r = space.allocate_instance(W_Chain, w_subtype) w_args = space.newtuple(args_w) @@ -493,6 +541,8 @@ __new__ = interp2app(W_Chain___new__), __iter__ = interp2app(W_Chain.iter_w), __next__ = interp2app(W_Chain.next_w), + __reduce__ = interp2app(W_Chain.descr_reduce), + __setstate__ = interp2app(W_Chain.descr_setstate), from_iterable = interp2app(chain_from_iterable, as_classmethod=True), __doc__ = """Make an iterator that returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until @@ -543,6 +593,23 @@ raise OperationError(self.space.w_StopIteration, self.space.w_None) return [self._fetch(index) for index in range(nb)] + def descr_reduce(self, space): + result_w = [space.type(self)] + + if self.iterators_w is not None: + iterators = [iterator if iterator is not None else space.newtuple([]) + for iterator in self.iterators_w] + iterators = space.newtuple(iterators) + else: + iterators = space.newtuple([]) + result_w = [space.type(self), + iterators, + self.w_fillvalue] + + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + self.w_fillvalue = w_state + def W_ZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() w_fillvalue = space.w_None @@ -566,6 +633,8 @@ __new__ = interp2app(W_ZipLongest___new__), __iter__ = interp2app(W_ZipLongest.iter_w), __next__ = interp2app(W_ZipLongest.next_w), + __reduce__ = interp2app(W_ZipLongest.descr_reduce), + __setstate__ = interp2app(W_ZipLongest.descr_setstate), __doc__ = """Return a zip_longest object whose .next() method returns a tuple where the i-th element comes from the i-th iterable argument. The .next() method continues until the longest iterable in the argument sequence @@ -674,6 +743,13 @@ w_obj = self.space.next(self.w_iterable) return self.space.call(self.w_fun, w_obj) + def descr_reduce(self): + return self.space.newtuple([self.space.gettypefor(W_StarMap), + self.space.newtuple([ + self.w_fun, + self.w_iterable]) + ]) + def W_StarMap___new__(space, w_subtype, w_fun, w_iterable): r = space.allocate_instance(W_StarMap, w_subtype) r.__init__(space, w_fun, w_iterable) @@ -684,6 +760,7 @@ __new__ = interp2app(W_StarMap___new__), __iter__ = interp2app(W_StarMap.iter_w), __next__ = interp2app(W_StarMap.next_w), + __reduce__ = interp2app(W_StarMap.descr_reduce), __doc__ = """Make an iterator that computes the function using arguments tuples obtained from the iterable. Used instead of imap() when argument parameters are already grouped in tuples from a single @@ -894,7 +971,19 @@ space.newtuple([ self.w_iterable, self.w_fun])]) - + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + num_args = len(state) + if num_args != 3: + raise OperationError(space.w_TypeError, + space.wrap("function takes exactly 3 arguments " + "(" + str(num_args) + " given)")) + w_key, w_lookahead, _ = state + self.w_key = w_key + self.w_lookahead = w_lookahead + if self.w_lookahead: + self.started = True + self.lookahead = True def W_GroupBy___new__(space, w_subtype, w_iterable, w_key=None): r = space.allocate_instance(W_GroupBy, w_subtype) @@ -907,6 +996,7 @@ __iter__ = interp2app(W_GroupBy.iter_w), __next__ = interp2app(W_GroupBy.next_w), __reduce__ = interp2app(W_GroupBy.descr_reduce), + __setstate__ = interp2app(W_GroupBy.descr_setstate), __doc__ = """Make an iterator that returns consecutive keys and groups from the iterable. The key is a function computing a key value for each element. If not specified or is None, key defaults to an identity @@ -949,10 +1039,27 @@ else: return w_obj + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([ + space.wrap(self.index), + space.wrap(self.groupby)]), + ]) + +def W_GroupByIterator__new__(space, w_subtype, w_index, w_groupby): + r = space.allocate_instance(W_GroupByIterator, w_subtype) + index = space.int_w(w_index) + groupby = space.interp_w(W_GroupBy, w_groupby) + r.__init__(space, index, groupby) + return space.wrap(r) + W_GroupByIterator.typedef = TypeDef( 'itertools._groupby', + __new__ = interp2app(W_GroupByIterator__new__), __iter__ = interp2app(W_GroupByIterator.iter_w), - __next__ = interp2app(W_GroupByIterator.next_w)) + __next__ = interp2app(W_GroupByIterator.next_w), + __reduce__ = interp2app(W_GroupByIterator.descr_reduce)) W_GroupByIterator.typedef.acceptable_as_base_class = False @@ -1018,7 +1125,7 @@ else: self.indices = [0] * len(self.gears) self.previous_indices = [] - self.lst = [gear[0] for gear in self.gears] + self.lst = None self.stopped = False def _rotate_previous_gears(self): @@ -1042,10 +1149,16 @@ x -= 1 else: self.lst = None + self.stopped = True def fill_next_result(self): # the last gear is done here, in a function with no loop, # to allow the JIT to look inside + if self.lst is None: + self.lst = [None for gear in self.gears] + for index, gear in enumerate(self.gears): + self.lst[index] = gear[0] + return lst = self.lst x = len(self.gears) - 1 if x >= 0: @@ -1059,17 +1172,17 @@ else: self._rotate_previous_gears() else: - self.lst = None + self.stopped = True def iter_w(self, space): return space.wrap(self) def next_w(self, space): - if self.lst is None: - self.stopped = True + if not self.stopped: + self.fill_next_result() + if self.stopped: raise OperationError(space.w_StopIteration, space.w_None) w_result = space.newtuple(self.lst[:]) - self.fill_next_result() return w_result def descr_reduce(self, space): @@ -1080,16 +1193,30 @@ space.newtuple(gears) #space.newtuple([space.newtuple(gear) for gear in self.gears]) ] - if self.previous_indices: + if self.lst is not None: result_w = result_w + [ - space.newtuple([ - space.wrap(index) for index in self.previous_indices])] + space.newtuple([ + space.wrap(index) for index in self.indices])] else: result_w = [ space.type(self), space.newtuple([space.newtuple([])]) ] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + gear_count = len(self.gears) + indices_w = space.unpackiterable(w_state) + lst = [] + for i, gear in enumerate(self.gears): + w_index = indices_w[i] + index = space.int_w(w_index) + if index < 0: + index = 0 + if index > gear_count - 1: + index = gear_count - 1 + self.indices[i] = index + lst.append(gear[self.indices[i]]) + self.lst = lst def W_Product__new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() @@ -1112,6 +1239,7 @@ __iter__ = interp2app(W_Product.iter_w), __next__ = interp2app(W_Product.next_w), __reduce__ = interp2app(W_Product.descr_reduce), + __setstate__ = interp2app(W_Product.descr_setstate), __doc__ = """ Cartesian product of input iterables. @@ -1229,7 +1357,7 @@ index = max if index < 0: index = 0 - self.indices.append(index) + self.indices[i] = index self.last_result_w = [ self.pool_w[self.indices[i]] for i in range(self.r)] @@ -1288,6 +1416,24 @@ space.wrap(index) for index in self.indices])] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + indices_w = space.fixedview(w_state) + if len(indices_w) != self.r: + raise OperationError(space.w_ValueError, space.wrap( + "invalid arguments")) + for i in range(self.r): + index = space.int_w(indices_w[i]) + max = self.get_maximum(i) + # clamp the index (beware of negative max) + if index > max: + index = max + if index < 0: + index = 0 + self.indices[i] = index + self.last_result_w = [ + self.pool_w[self.indices[i]] + for i in range(self.r)] + @unwrap_spec(r=int) def W_CombinationsWithReplacement__new__(space, w_subtype, w_iterable, r): pool_w = space.fixedview(w_iterable) @@ -1305,6 +1451,7 @@ __iter__ = interp2app(W_CombinationsWithReplacement.descr__iter__), __next__ = interp2app(W_CombinationsWithReplacement.descr_next), __reduce__ = interp2app(W_CombinationsWithReplacement.descr_reduce), + __setstate__ = interp2app(W_CombinationsWithReplacement.descr_setstate), __doc__ = """\ combinations_with_replacement(iterable, r) --> combinations_with_replacement object @@ -1321,17 +1468,19 @@ n = len(pool_w) n_minus_r = n - r if n_minus_r < 0: - self.stopped = True + self.stopped = self.raised_stop_iteration = True else: - self.stopped = False + self.stopped = self.raised_stop_iteration = False self.indices = range(n) self.cycles = range(n, n_minus_r, -1) + self.started = False def descr__iter__(self, space): return self def descr_next(self, space): if self.stopped: + self.raised_stop_iteration = True raise OperationError(space.w_StopIteration, space.w_None) r = self.r indices = self.indices @@ -1354,7 +1503,63 @@ indices[n1] = num i -= 1 self.stopped = True + if self.started: + raise OperationError(space.w_StopIteration, space.w_None) + else: + self.started = True return w_result + def descr_reduce(self, space): + if self.raised_stop_iteration: + pool_w = [] + else: + pool_w = self.pool_w + result_w = [ + space.type(self), + space.newtuple([ + space.newtuple(pool_w), space.wrap(self.r) + ])] + if not self.raised_stop_iteration: + # we must pickle the indices and use them for setstate + result_w = result_w + [ + space.newtuple([ + space.newtuple([ + space.wrap(index) for index in self.indices]), + space.newtuple([ + space.wrap(num) for num in self.cycles]), + space.wrap(self.started) + ])] + return space.newtuple(result_w) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + if len(state) == 3: + w_indices, w_cycles, w_started = state + indices_w = space.unpackiterable(w_indices) + cycles_w = space.unpackiterable(w_cycles) + self.started = space.bool_w(w_started) + else: + raise OperationError(space.w_ValueError, space.wrap( + "invalid arguments")) + + if len(indices_w) != len(self.pool_w) or len(cycles_w) != self.r: + raise OperationError(space.w_ValueError, space.wrap( + "inavalid arguments")) + + n = len(self.pool_w) + for i in range(n): + index = space.int_w(indices_w[i]) + if index < 0: + index = 0 + elif index > n-1: + index = n-1 + self.indices[i] = index + + for i in range(self.r): + index = space.int_w(cycles_w[i]) + if index < 1: + index = 1 + elif index > n-i: + index = n-i + self.cycles[i] = index def W_Permutations__new__(space, w_subtype, w_iterable, w_r=None): pool_w = space.fixedview(w_iterable) @@ -1370,6 +1575,8 @@ __new__ = interp2app(W_Permutations__new__), __iter__ = interp2app(W_Permutations.descr__iter__), __next__ = interp2app(W_Permutations.descr_next), + __reduce__ = interp2app(W_Permutations.descr_reduce), + __setstate__ = interp2app(W_Permutations.descr_setstate), __doc__ = """\ permutations(iterable[, r]) --> permutations object @@ -1383,7 +1590,7 @@ def __init__(self, space, w_iterable, w_func=None): self.space = space self.w_iterable = w_iterable - self.w_func = w_func + self.w_func = w_func if not space.is_w(w_func, space.w_None) else None self.w_total = None def iter_w(self): @@ -1401,15 +1608,15 @@ self.w_total = self.space.call_function(self.w_func, self.w_total, w_value) return self.w_total - def reduce_w(self): + def descr_reduce(self): space = self.space w_total = space.w_None if self.w_total is None else self.w_total w_func = space.w_None if self.w_func is None else self.w_func return space.newtuple([space.gettypefor(W_Accumulate), space.newtuple([self.w_iterable, w_func]), w_total]) - def setstate_w(self, w_total): - self.w_total = w_total + def descr_setstate(self, space, w_state): + self.w_total = w_state if not space.is_w(w_state, space.w_None) else None def W_Accumulate__new__(space, w_subtype, w_iterable, w_func=None): r = space.allocate_instance(W_Accumulate, w_subtype) @@ -1420,8 +1627,8 @@ __new__ = interp2app(W_Accumulate__new__), __iter__ = interp2app(W_Accumulate.iter_w), __next__ = interp2app(W_Accumulate.next_w), - __reduce__ = interp2app(W_Accumulate.reduce_w), - __setstate__ = interp2app(W_Accumulate.setstate_w), + __reduce__ = interp2app(W_Accumulate.descr_reduce), + __setstate__ = interp2app(W_Accumulate.descr_setstate), __doc__ = """\ "accumulate(iterable) --> accumulate object From pypy.commits at gmail.com Tue Apr 12 15:19:37 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:37 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Get rid of some unnecessary changes. Message-ID: <570d4a49.4e301c0a.63d24.38f2@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83631:c5325898e2ae Date: 2016-04-11 22:14 -0400 http://bitbucket.org/pypy/pypy/changeset/c5325898e2ae/ Log: Get rid of some unnecessary changes. diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -110,6 +110,7 @@ dump = pickle.dumps(i3) i4 = pickle.loads(dump) a, b = expand(i3), expand(i4) + print(("AB", a, b)) self.assertEqual(a, b) if compare: c = expand(compare[took:]) @@ -139,12 +140,12 @@ [2, 8, 9, 9, 9, 9, 9, 9, 9, 9]) self.assertEqual(list(accumulate(s, operator.mul)), [2, 16, 144, 720, 5040, 0, 0, 0, 0, 0]) - with self.assertRaises(TypeError): - list(accumulate(s, chr)) # unary-operation + #with self.assertRaises(TypeError): + # list(accumulate(s, chr)) # unary-operation self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): - + return True def chain2(*iterables): 'Pure python version in the docs' for it in iterables: @@ -159,6 +160,7 @@ self.assertRaises(TypeError, list,c(2, 3)) def test_chain_from_iterable(self): + return True self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef')) self.assertEqual(list(chain.from_iterable(['abc'])), list('abc')) self.assertEqual(list(chain.from_iterable([''])), []) @@ -180,6 +182,7 @@ self.pickletest(chain('abc', 'def'), compare=list('abcdef')) def test_combinations(self): + return True self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable @@ -406,7 +409,7 @@ if r == n: self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r - + print("POTATO", values, r) self.pickletest(permutations(values, r)) # test pickling @support.impl_detail("tuple resuse is CPython specific") @@ -415,6 +418,7 @@ self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) def test_combinatorics(self): + return True # Test relationships between product(), permutations(), # combinations() and combinations_with_replacement(). @@ -439,6 +443,7 @@ self.assertEqual(comb, sorted(set(comb))) # Check interrelationships + print(n, s, prod, sorted(t), list(t)) self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted @@ -448,6 +453,7 @@ self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm def test_compress(self): + return True self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list('')) @@ -482,6 +488,7 @@ def test_count(self): + return True self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)]) @@ -521,6 +528,7 @@ count(1, maxsize+5); sys.exc_info() def test_count_with_stride(self): + return True self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(start=2,step=3)), [('a', 2), ('b', 5), ('c', 8)]) @@ -565,6 +573,7 @@ self.pickletest(count(i, j)) def test_cycle(self): + return True self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) self.assertEqual(list(cycle('')), []) self.assertRaises(TypeError, cycle) @@ -692,6 +701,7 @@ self.assertRaises(ExpectedError, gulp, [None, None], keyfunc) def test_filter(self): + return True self.assertEqual(list(filter(isEven, range(6))), [0,2,4]) self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2]) self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2]) @@ -729,6 +739,7 @@ self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): + return True # XXX This is rather silly now that builtin zip() calls zip()... ans = [(x,y) for x, y in zip('abc',count())] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) @@ -769,6 +780,7 @@ self.pickletest(zip('abc', count())) def test_ziplongest(self): + return True for args in [ ['abc', range(6)], [range(6), 'abc'], @@ -824,6 +836,7 @@ self.pickletest(zip_longest("", "defgh")) def test_bug_7244(self): + return True class Repeater: # this class is similar to itertools.repeat @@ -864,6 +877,7 @@ self.assertRaises(RuntimeError, next, it) def test_product(self): + return True for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable @@ -936,9 +950,13 @@ ([range(2), range(0), range(3)], []), # middle iterable with zero length ([range(2), range(3), range(0)], []), # last iterable with zero length ]: + print("ARGS", args) self.assertEqual(list(copy.copy(product(*args))), result) + print("WOMBAT") self.assertEqual(list(copy.deepcopy(product(*args))), result) + print("FLEECE") self.pickletest(product(*args)) + print("SILK") def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) @@ -966,6 +984,7 @@ self.pickletest(repeat(object='a', times=10)) def test_map(self): + return True self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) self.assertEqual(list(map(tupleize, 'abc', range(5))), @@ -1021,6 +1040,7 @@ self.pickletest(c) def test_islice(self): + return True for args in [ # islice(args) should agree with range(args) (10, 20, 3), (10, 3, 20), @@ -1086,6 +1106,7 @@ self.pickletest(islice(range(100), *args)) def test_takewhile(self): + return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) @@ -1105,6 +1126,7 @@ self.pickletest(takewhile(underten, data)) def test_dropwhile(self): + return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) @@ -1121,6 +1143,7 @@ self.pickletest(dropwhile(underten, data)) def test_tee(self): + return True n = 200 a, b = tee([]) # test empty iterator @@ -1269,11 +1292,13 @@ # Issue 13454: Crash when deleting backward iterator from tee() def test_tee_del_backward(self): + return True forward, backward = tee(repeat(None, 20000000)) any(forward) # exhaust the iterator del backward def test_StopIteration(self): + return True self.assertRaises(StopIteration, next, zip()) for f in (chain, cycle, zip, groupby): diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -745,10 +745,10 @@ def descr_reduce(self): return self.space.newtuple([self.space.gettypefor(W_StarMap), - self.space.newtuple([ - self.w_fun, - self.w_iterable]) - ]) + self.space.newtuple([ + self.w_fun, + self.w_iterable]) + ]) def W_StarMap___new__(space, w_subtype, w_fun, w_iterable): r = space.allocate_instance(W_StarMap, w_subtype) @@ -1114,7 +1114,7 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.unpackiterable(arg_w)[:] for arg_w in args_w + space.unpackiterable(arg_w) for arg_w in args_w ] * space.int_w(w_repeat) # for gear in self.gears: @@ -1124,7 +1124,6 @@ break else: self.indices = [0] * len(self.gears) - self.previous_indices = [] self.lst = None self.stopped = False @@ -1132,7 +1131,6 @@ lst = self.lst x = len(self.gears) - 1 lst[x] = self.gears[x][0] - self.previous_indices = self.indices[:] self.indices[x] = 0 x -= 1 # the outer loop runs as long as a we have a carry @@ -1167,7 +1165,6 @@ if index < len(gear): # no carry: done lst[x] = gear[index] - self.previous_indices = self.indices[:] self.indices[x] = index else: self._rotate_previous_gears() @@ -1191,7 +1188,6 @@ result_w = [ space.type(self), space.newtuple(gears) - #space.newtuple([space.newtuple(gear) for gear in self.gears]) ] if self.lst is not None: result_w = result_w + [ From pypy.commits at gmail.com Tue Apr 12 15:19:39 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:39 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Get rid of some unnecessary changes. Message-ID: <570d4a4b.d3301c0a.f5fd9.340b@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83632:9297f7877eea Date: 2016-04-11 22:17 -0400 http://bitbucket.org/pypy/pypy/changeset/9297f7877eea/ Log: Get rid of some unnecessary changes. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -745,10 +745,10 @@ def descr_reduce(self): return self.space.newtuple([self.space.gettypefor(W_StarMap), - self.space.newtuple([ - self.w_fun, - self.w_iterable]) - ]) + self.space.newtuple([ + self.w_fun, + self.w_iterable]) + ]) def W_StarMap___new__(space, w_subtype, w_fun, w_iterable): r = space.allocate_instance(W_StarMap, w_subtype) @@ -1114,7 +1114,7 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.unpackiterable(arg_w)[:] for arg_w in args_w + space.unpackiterable(arg_w) for arg_w in args_w ] * space.int_w(w_repeat) # for gear in self.gears: @@ -1124,7 +1124,6 @@ break else: self.indices = [0] * len(self.gears) - self.previous_indices = [] self.lst = None self.stopped = False @@ -1132,7 +1131,6 @@ lst = self.lst x = len(self.gears) - 1 lst[x] = self.gears[x][0] - self.previous_indices = self.indices[:] self.indices[x] = 0 x -= 1 # the outer loop runs as long as a we have a carry @@ -1167,7 +1165,6 @@ if index < len(gear): # no carry: done lst[x] = gear[index] - self.previous_indices = self.indices[:] self.indices[x] = index else: self._rotate_previous_gears() @@ -1191,7 +1188,6 @@ result_w = [ space.type(self), space.newtuple(gears) - #space.newtuple([space.newtuple(gear) for gear in self.gears]) ] if self.lst is not None: result_w = result_w + [ From pypy.commits at gmail.com Tue Apr 12 15:19:41 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Apr 2016 12:19:41 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Oops. Didn't mean to commit those changes to test_itertools. Backing that out. Message-ID: <570d4a4d.10981c0a.dd5fc.39f7@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83633:bb52bf6d26bf Date: 2016-04-11 22:30 -0400 http://bitbucket.org/pypy/pypy/changeset/bb52bf6d26bf/ Log: Oops. Didn't mean to commit those changes to test_itertools. Backing that out. From pypy.commits at gmail.com Tue Apr 12 15:27:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Apr 2016 12:27:01 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix code formatting Message-ID: <570d4c05.2a18c20a.d2685.ffffc014@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83635:2a3d41ab2ead Date: 2016-04-12 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/2a3d41ab2ead/ Log: fix code formatting diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -114,7 +114,7 @@ else: s = 'repeat(%s)' % (objrepr,) return self.space.wrap(s) - + def descr_reduce(self): space = self.space if self.counting: @@ -492,7 +492,7 @@ except OperationError, e: pass # loop back to the start of _handle_error(e) - def descr_reduce(self, space): + def descr_reduce(self, space): if self.w_iterables is not None: if self.w_it is not None: inner_contents = [self.w_iterables, self.w_it] @@ -505,6 +505,7 @@ result_w = [space.type(self), space.newtuple([])] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): state = space.unpackiterable(w_state) num_args = len(state) @@ -605,8 +606,8 @@ result_w = [space.type(self), iterators, self.w_fillvalue] - return space.newtuple(result_w) + def descr_setstate(self, space, w_state): self.w_fillvalue = w_state @@ -953,6 +954,7 @@ self.lookahead = True self.new_group = True #new group raise StopIteration + def descr_reduce(self, space): if self.started: return space.newtuple([ @@ -971,6 +973,7 @@ space.newtuple([ self.w_iterable, self.w_fun])]) + def descr_setstate(self, space, w_state): state = space.unpackiterable(w_state) num_args = len(state) @@ -1199,6 +1202,7 @@ space.newtuple([space.newtuple([])]) ] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): gear_count = len(self.gears) indices_w = space.unpackiterable(w_state) @@ -1525,6 +1529,7 @@ space.wrap(self.started) ])] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): state = space.unpackiterable(w_state) if len(state) == 3: @@ -1535,11 +1540,11 @@ else: raise OperationError(space.w_ValueError, space.wrap( "invalid arguments")) - + if len(indices_w) != len(self.pool_w) or len(cycles_w) != self.r: raise OperationError(space.w_ValueError, space.wrap( "inavalid arguments")) - + n = len(self.pool_w) for i in range(n): index = space.int_w(indices_w[i]) @@ -1572,7 +1577,7 @@ __iter__ = interp2app(W_Permutations.descr__iter__), __next__ = interp2app(W_Permutations.descr_next), __reduce__ = interp2app(W_Permutations.descr_reduce), - __setstate__ = interp2app(W_Permutations.descr_setstate), + __setstate__ = interp2app(W_Permutations.descr_setstate), __doc__ = """\ permutations(iterable[, r]) --> permutations object From pypy.commits at gmail.com Tue Apr 12 15:32:21 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 12:32:21 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: actually call the new fcn (fijal) Message-ID: <570d4d45.86351c0a.f917.4345@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83636:778fb79d283d Date: 2016-04-12 21:13 +0300 http://bitbucket.org/pypy/pypy/changeset/778fb79d283d/ Log: actually call the new fcn (fijal) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -497,8 +497,10 @@ @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, header=None) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) - def slot_tp_new(space, w_type, w_args, w_kwds): - return space.call(w_type, w_args, w_kwds) + def slot_tp_new(space, w_self, w_args, w_kwds): + args = Arguments(space, [w_self], + w_stararg=w_args, w_starstararg=w_kwds) + return space.call_args(space.get(new_fn, w_self), args) api_func = slot_tp_new.api_func else: return From pypy.commits at gmail.com Tue Apr 12 15:32:24 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 12:32:24 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: merge default into release Message-ID: <570d4d48.657bc20a.12cb2.fffff82c@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83637:bd564db689c9 Date: 2016-04-12 21:19 +0300 http://bitbucket.org/pypy/pypy/changeset/bd564db689c9/ Log: merge default into release diff too long, truncating to 2000 out of 26542 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,4 +18,5 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -46,7 +46,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt --- a/pypy/doc/config/translation.gc.txt +++ b/pypy/doc/config/translation.gc.txt @@ -1,24 +1,26 @@ Choose the Garbage Collector used by the translated program. -The good performing collectors are "hybrid" and "minimark". -The default is "minimark". +The recommended default is "incminimark". - "ref": reference counting. Takes very long to translate and the result is - slow. + slow. Used only for tests. Don't use it for real RPython programs. - - "marksweep": naive mark & sweep. + - "none": no GC. Leaks everything. Don't use it for real RPython + programs: the rate of leaking is immense. - "semispace": a copying semi-space GC. - "generation": a generational GC using the semi-space GC for the older generation. - - "boehm": use the Boehm conservative GC. - - "hybrid": a hybrid collector of "generation" together with a mark-n-sweep old space - - "markcompact": a slow, but memory-efficient collector, - influenced e.g. by Smalltalk systems. + - "boehm": use the Boehm conservative GC. - "minimark": a generational mark-n-sweep collector with good performance. Includes page marking for large arrays. + + - "incminimark": like minimark, but adds incremental major + collections. Seems to come with no performance drawback over + "minimark", so it is the default. A few recent features of PyPy + (like cpyext) are only working with this GC. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -80,7 +80,7 @@ .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _EU Reports: index-report.html .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -128,6 +128,9 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.1.rst @@ -0,0 +1,40 @@ +========== +PyPy 5.0.1 +========== + +We have released a bugfix for PyPy 5.0, after reports that the newly released +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_. +Thanks to those who reported the crash. Please update, downloads are available +at pypy.org/download.html + +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 + +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in +cpyext, which fixes notably (but not only) lxml; and another for a +corner case of the JIT. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,52 @@ ========================= .. this is a revision shortly after release-5.0 -.. startrev: 9c4299dc2d60 +.. startrev: b238b48f9138 +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True @@ -327,7 +328,7 @@ # XXX possibly adapt options using modules failures = create_cffi_import_libraries(exename, options, basedir) # if failures, they were already printed - print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored' + print >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)' driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver) driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal] driver.default_goal = 'build_cffi_imports' diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -417,7 +417,10 @@ self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: - self.call_function(w_exitfunc) + try: + self.call_function(w_exitfunc) + except OperationError as e: + e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc) from pypy.interpreter.module import Module for w_mod in self.builtin_modules.values(): if isinstance(w_mod, Module) and w_mod.startup_called: diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -291,13 +291,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -416,3 +416,14 @@ i -= 1 assert i >= 0 gc.collect() + + def test_exitfunc_catches_exceptions(self): + from pypy.tool.pytest.objspace import maketestobjspace + space = maketestobjspace() + space.appexec([], """(): + import sys + sys.exitfunc = lambda: this_is_an_unknown_name + """) + space.finish() + # assert that we reach this point without getting interrupted + # by the OperationError(NameError) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,23 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + raises(TypeError, _promote, u"abc") + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -124,7 +124,7 @@ s = rffi.charp2str(ptr) else: s = rffi.charp2strn(ptr, length) - return space.wrap(s) + return space.wrapbytes(s) # # pointer to a wchar_t: builds and returns a unicode if self.is_unichar_ptr_or_array(): @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -371,15 +372,15 @@ rffi_fclose(self.llf) -def prepare_file_argument(space, fileobj): - fileobj.direct_flush() - if fileobj.cffi_fileobj is None: - fd = fileobj.direct_fileno() +def prepare_file_argument(space, w_fileobj): + w_fileobj.direct_flush() + if w_fileobj.cffi_fileobj is None: + fd = w_fileobj.direct_fileno() if fd < 0: raise OperationError(space.w_ValueError, space.wrap("file has no OS file descriptor")) try: - fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) + w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -733,6 +733,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): @@ -72,9 +76,9 @@ def test_enable_ovf(self): import _vmprof - raises(_vmprof.VMProfError, _vmprof.enable, 999, 0) - raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 0) + raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300) NaN = (1e300*1e300) / (1e300*1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN) + raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1001,12 +1001,6 @@ functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1047,11 +1041,6 @@ pypy_decls.append('#undef Signed /* xxx temporary fix */\n') pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -134,8 +134,14 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -147,8 +153,14 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -132,7 +132,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/structmember.h b/pypy/module/cpyext/include/structmember.h --- a/pypy/module/cpyext/include/structmember.h +++ b/pypy/module/cpyext/include/structmember.h @@ -78,7 +78,11 @@ /* API functions. */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE #include "pypy_structmember_decl.h" +#endif #ifdef __cplusplus diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h --- a/pypy/module/cpyext/include/unicodeobject.h +++ b/pypy/module/cpyext/include/unicodeobject.h @@ -20,8 +20,12 @@ typedef struct { PyObject_HEAD - Py_UNICODE *buffer; + Py_UNICODE *str; Py_ssize_t size; + long hash; /* Hash value; -1 if not set */ + PyObject *defenc; /* (Default) Encoded version as Python + string, or NULL; this is used for + implementing the buffer protocol */ } PyUnicodeObject; diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -2,7 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.structmemberdefs import * -from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api +from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api, CONST_STRING from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref @@ -34,7 +34,7 @@ _HEADER = 'pypy_structmember_decl.h' - at cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) + at cpython_api([CONST_STRING, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset @@ -85,7 +85,7 @@ return w_result - at cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, + at cpython_api([rffi.CCHARP, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, error=-1, header=_HEADER) def PyMember_SetOne(space, obj, w_member, w_value): addr = rffi.cast(ADDR, obj) diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -139,6 +139,44 @@ ]) module.getstring() + def test_py_string_as_string_Unicode(self): + module = self.import_extension('foo', [ + ("getstring_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + buf = PyString_AsString(u1); + if (buf == NULL) + return NULL; + if (buf[3] != 't') { + PyErr_SetString(PyExc_AssertionError, "Bad conversion"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ("getstringandsize_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(u1, &buf, &len) < 0) + return NULL; + if (len != 4) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring_unicode() + module.getstringandsize_unicode() + def test_format_v(self): module = self.import_extension('foo', [ ("test_string_format_v", "METH_VARARGS", diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -606,7 +606,7 @@ long intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -24,7 +24,7 @@ if(PyUnicode_GetSize(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*5) + if(s->ob_type->tp_basicsize != sizeof(void*)*7) result = 0; Py_DECREF(s); return PyBool_FromLong(result); @@ -66,6 +66,7 @@ c = PyUnicode_AsUnicode(s); c[0] = 'a'; c[1] = 0xe9; + c[2] = 0x00; c[3] = 'c'; return s; """), @@ -74,7 +75,35 @@ assert len(s) == 4 assert s == u'a�\x00c' + def test_hash(self): + module = self.import_extension('foo', [ + ("test_hash", "METH_VARARGS", + ''' + PyObject* obj = (PyTuple_GetItem(args, 0)); + long hash = ((PyUnicodeObject*)obj)->hash; + return PyLong_FromLong(hash); + ''' + ), + ]) + res = module.test_hash(u"xyz") + assert res == hash(u'xyz') + def test_default_encoded_string(self): + module = self.import_extension('foo', [ + ("test_default_encoded_string", "METH_O", + ''' + PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace"); + Py_INCREF(result); + return result; + ''' + ), + ]) + res = module.test_default_encoded_string(u"xyz") + assert isinstance(res, str) + assert res == 'xyz' + res = module.test_default_encoded_string(u"caf\xe9") + assert isinstance(res, str) + assert res == 'caf?' class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): @@ -155,22 +184,22 @@ def test_unicode_resize(self, space, api): py_uni = new_empty_unicode(space, 10) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - py_uni.c_buffer[0] = u'a' - py_uni.c_buffer[1] = u'b' - py_uni.c_buffer[2] = u'c' + py_uni.c_str[0] = u'a' + py_uni.c_str[1] = u'b' + py_uni.c_str[2] = u'c' ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 3) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 3 - assert py_uni.c_buffer[1] == u'b' - assert py_uni.c_buffer[3] == u'\x00' + assert py_uni.c_str[1] == u'b' + assert py_uni.c_str[3] == u'\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 10) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 10 - assert py_uni.c_buffer[1] == 'b' - assert py_uni.c_buffer[10] == '\x00' + assert py_uni.c_str[1] == 'b' + assert py_uni.c_str[10] == '\x00' Py_DecRef(space, ar[0]) lltype.free(ar, flavor='raw') @@ -386,11 +415,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -423,7 +452,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -271,17 +271,32 @@ def member_getter(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - return PyMember_GetOne(space, w_self, self.member) + pyref = make_ref(space, w_self) + try: + return PyMember_GetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member) + finally: + Py_DecRef(space, pyref) def member_delete(self, space, w_self): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, None) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, None) + finally: + Py_DecRef(space, pyref) def member_setter(self, space, w_self, w_value): assert isinstance(self, W_MemberDescr) check_descr(space, w_self, self.w_type) - PyMember_SetOne(space, w_self, self.member, w_value) + pyref = make_ref(space, w_self) + try: + PyMember_SetOne( + space, rffi.cast(rffi.CCHARP, pyref), self.member, w_value) + finally: + Py_DecRef(space, pyref) class W_PyCTypeObject(W_TypeObject): @jit.dont_look_inside diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -22,7 +22,8 @@ PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + - (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) + (("str", rffi.CWCHARP), ("size", Py_ssize_t), + ("hash", rffi.LONG), ("defenc", PyObject))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function @@ -54,16 +55,20 @@ buflen = length + 1 py_uni.c_size = length - py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True, - add_memory_pressure=True) + py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True, + add_memory_pressure=True) + py_uni.c_hash = -1 + py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) - py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_hash = space.hash_w(w_obj) + py_unicode.c_defenc = lltype.nullptr(PyObject.TO) def unicode_realize(space, py_obj): """ @@ -71,17 +76,20 @@ be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) - s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size) w_obj = space.wrap(s) + py_uni.c_hash = space.hash_w(w_obj) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) - if py_unicode.c_buffer: - lltype.free(py_unicode.c_buffer, flavor="raw") + if py_unicode.c_str: + lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc + if py_unicode.c_defenc: + PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) @@ -205,12 +213,12 @@ """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) - if not ref_unicode.c_buffer: + if not ref_unicode.c_str: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = rffi.unicode2wcharp(u) - return ref_unicode.c_buffer + ref_unicode.c_str = rffi.unicode2wcharp(u) + return ref_unicode.c_str @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): @@ -241,7 +249,7 @@ string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" - c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) + c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well @@ -251,7 +259,7 @@ i = 0 while i < size: - buf[i] = c_buffer[i] + buf[i] = c_str[i] i += 1 if size > c_size: @@ -343,8 +351,15 @@ return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): - return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) +def _PyUnicode_AsDefaultEncodedString(space, ref, errors): + # Returns a borrowed reference. + py_uni = rffi.cast(PyUnicodeObject, ref) + if not py_uni.c_defenc: + py_uni.c_defenc = make_ref( + space, PyUnicode_AsEncodedString( + space, ref, + lltype.nullptr(rffi.CCHARP.TO), errors)) + return py_uni.c_defenc @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): @@ -444,7 +459,7 @@ def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) - if not py_uni.c_buffer: + if not py_uni.c_str: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: @@ -458,7 +473,7 @@ if oldsize < newsize: to_cp = oldsize for i in range(to_cp): - py_newuni.c_buffer[i] = py_uni.c_buffer[i] + py_newuni.c_str[i] = py_uni.c_str[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -109,7 +109,7 @@ import marshal, stat, struct, os, imp code = py.code.Source(p.join("x.py").read()).compile() s3 = marshal.dumps(code) - s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME]) + s2 = struct.pack("= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -54,8 +54,24 @@ assert (where(False, 1, [1, 2, 3]) == [1, 2, 3]).all() assert (where([1, 2, 3], True, False) == [True, True, True]).all() - #def test_where_1_arg(self): - # xxx + def test_where_1_arg(self): + from numpy import where, array + + result = where([1,0,1]) + + assert isinstance(result, tuple) + assert len(result) == 1 + assert (result[0] == array([0, 2])).all() + + def test_where_1_arg_2d(self): + from numpy import where, array + + result = where([[1,0,1],[2,-1,-1]]) + + assert isinstance(result, tuple) + assert len(result) == 2 + assert (result[0] == array([0, 0, 1, 1, 1])).all() + assert (result[1] == array([0, 2, 0, 1, 2])).all() def test_where_invalidates(self): from numpy import where, ones, zeros, array diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py From pypy.commits at gmail.com Tue Apr 12 15:32:26 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 12:32:26 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: update version to 5.1 Message-ID: <570d4d4a.654fc20a.4179b.ffffecc3@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83638:9e6106e9a66b Date: 2016-04-12 21:22 +0300 http://bitbucket.org/pypy/pypy/changeset/9e6106e9a66b/ Log: update version to 5.1 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.0.1" -#define PYPY_VERSION_NUM 0x05000100 +#define PYPY_VERSION "5.1.0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 0, 1, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 0, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Tue Apr 12 15:32:28 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 12:32:28 -0700 (PDT) Subject: [pypy-commit] pypy default: start release 5.1 Message-ID: <570d4d4c.463f1c0a.45de1.451f@mx.google.com> Author: mattip Branch: Changeset: r83639:38f00564089c Date: 2016-04-12 21:52 +0300 http://bitbucket.org/pypy/pypy/changeset/38f00564089c/ Log: start release 5.1 diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,102 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about two months after PyPy 5.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. + +We now fully support the IBM s390x architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0.1 released in Febuary 2015) +========================================================= + +* New features: + + * + + * + + * + +* Bug Fixes + + * + + * + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * + + * + +* Performance improvements: + + * + + * + +* Internal refactorings: + + * + + * + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.1.0.rst rename from pypy/doc/whatsnew-head.rst rename to pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -1,5 +1,5 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1 ========================= .. this is a revision shortly after release-5.0 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION "5.2.0-alpha0" #define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 2, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Tue Apr 12 15:32:30 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 12:32:30 -0700 (PDT) Subject: [pypy-commit] pypy default: restart whatsnew Message-ID: <570d4d4e.519d1c0a.bc370.42e9@mx.google.com> Author: mattip Branch: Changeset: r83640:da2c9c9f72c1 Date: 2016-04-12 21:56 +0300 http://bitbucket.org/pypy/pypy/changeset/da2c9c9f72c1/ Log: restart whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-head.rst @@ -0,0 +1,7 @@ +========================= +What's new in PyPy 5.1+ +========================= + +.. this is a revision shortly after release-5.1 +.. startrev: fb4f0a20239b + From pypy.commits at gmail.com Tue Apr 12 16:23:26 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:26 -0700 (PDT) Subject: [pypy-commit] pypy cleanup-includes: close branch to be merged Message-ID: <570d593e.858e1c0a.1ea7b.fffff36b@mx.google.com> Author: mattip Branch: cleanup-includes Changeset: r83641:29436a9bd1ef Date: 2016-04-12 23:13 +0300 http://bitbucket.org/pypy/pypy/changeset/29436a9bd1ef/ Log: close branch to be merged From pypy.commits at gmail.com Tue Apr 12 16:23:28 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:28 -0700 (PDT) Subject: [pypy-commit] pypy default: merge cleanup-includes which improves micronumpy header status Message-ID: <570d5940.d51f1c0a.ad677.ffffe9af@mx.google.com> Author: mattip Branch: Changeset: r83642:1ec97ef2b6e9 Date: 2016-04-12 23:15 +0300 http://bitbucket.org/pypy/pypy/changeset/1ec97ef2b6e9/ Log: merge cleanup-includes which improves micronumpy header status diff too long, truncating to 2000 out of 2781 lines diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -231,7 +219,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -265,6 +254,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -272,10 +263,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -363,7 +350,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -792,10 +780,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -804,7 +793,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -821,6 +811,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -896,13 +891,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -995,7 +996,7 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] @@ -1010,19 +1011,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1039,8 +1049,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1147,7 +1159,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1189,14 +1202,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,69 +1,9 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H +/* For testing ndarrayobject only */ + #include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -91,18 +31,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - /* * These characters correspond to the array type and the struct * module @@ -157,27 +85,6 @@ }; typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -186,7 +93,6 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -200,729 +106,6 @@ NPY_KEEPORDER=2 } NPY_ORDER; -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of UPDATEIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* * C API: consists of Macros and functions. The MACROS are defined @@ -937,850 +120,4 @@ #define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) #define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define _PyArray_ITER_NEXT3(it) do { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] \ - (it)->backstrides[1]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - /* - * if subspace iteration, the these are the coordinates to the - * start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - /* - * consec is first used to indicate wether fancy indices are - * consecutive and then denotes at which axis they are inserted - */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -//#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE - -/* - * All sorts of useful ways to look into a PyArrayObject. It is recommended - * to use PyArrayObject * objects instead of always casting from PyObject *, - * for improved type checking. - * - * In many cases here the macro versions of the accessors are deprecated, - * but can't be immediately changed to inline functions because the - * preexisting macros accept PyObject * and do automatic casts. Inline - * functions accepting PyArrayObject * provides for some compile-time - * checking of correctness when working with these objects in C. - */ - -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ - (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ - NPY_ARRAY_F_CONTIGUOUS : 0)) - -#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) -/* - * Changing access macros into functions, to allow for future hiding - * of the internal memory layout. This later hiding will allow the 2.x series - * to change the internal representation of arrays without affecting - * ABI compatibility. - */ - -static NPY_INLINE int -PyArray_NDIM(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->nd; -} - -static NPY_INLINE void * -PyArray_DATA(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE char * -PyArray_BYTES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->data; -} - -static NPY_INLINE npy_intp * -PyArray_DIMS(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -static NPY_INLINE npy_intp * -PyArray_STRIDES(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->strides; -} - -static NPY_INLINE npy_intp -PyArray_DIM(const PyArrayObject *arr, int idim) -{ - return ((PyArrayObject_fields *)arr)->dimensions[idim]; -} - -static NPY_INLINE npy_intp -PyArray_STRIDE(const PyArrayObject *arr, int istride) -{ - return ((PyArrayObject_fields *)arr)->strides[istride]; -} - -static NPY_INLINE PyObject * -PyArray_BASE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->base; -} - -static NPY_INLINE PyArray_Descr * -PyArray_DESCR(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE int -PyArray_FLAGS(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->flags; -} - -static NPY_INLINE npy_intp -PyArray_ITEMSIZE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->elsize; -} - -static NPY_INLINE int -PyArray_TYPE(const PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr->type_num; -} - -static NPY_INLINE int -PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) -{ - return (PyArray_FLAGS(arr) & flags) == flags; -} - -static NPY_INLINE PyObject * -PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) -{ - return ((PyArrayObject_fields *)arr)->descr->f->getitem( - (void *)itemptr, (PyArrayObject *)arr); -} - -static NPY_INLINE int -PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) -{ - return ((PyArrayObject_fields *)arr)->descr->f->setitem( - v, itemptr, arr); -} - -#else - -/* These macros are deprecated as of NumPy 1.7. */ -#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) -#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) -#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) -#define PyArray_ITEMSIZE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) \ - (((PyArrayObject_fields *)(obj))->descr->type_num) -#define PyArray_GETITEM(obj,itemptr) \ - PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) -#endif - -static NPY_INLINE PyArray_Descr * -PyArray_DTYPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->descr; -} - -static NPY_INLINE npy_intp * -PyArray_SHAPE(PyArrayObject *arr) -{ - return ((PyArrayObject_fields *)arr)->dimensions; -} - -/* - * Enables the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags |= flags; -} - -/* - * Clears the specified array flags. Does no checking, - * assumes you know what you're doing. - */ -static NPY_INLINE void -PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) -{ - ((PyArrayObject_fields *)arr)->flags &= ~flags; -} - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) - -#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ - ((type) == NPY_HALF)) From pypy.commits at gmail.com Tue Apr 12 16:23:30 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:30 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: merge default into branch Message-ID: <570d5942.e5ecc20a.4afe2.01b6@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83643:f21faa6f1e0b Date: 2016-04-12 23:16 +0300 http://bitbucket.org/pypy/pypy/changeset/f21faa6f1e0b/ Log: merge default into branch diff too long, truncating to 2000 out of 3057 lines diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,102 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about two months after PyPy 5.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. + +We now fully support the IBM s390x architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0.1 released in Febuary 2015) +========================================================= + +* New features: + + * + + * + + * + +* Bug Fixes + + * + + * + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * + + * + +* Performance improvements: + + * + + * + +* Internal refactorings: + + * + + * + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.1.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -1,5 +1,5 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1 ========================= .. this is a revision shortly after release-5.0 @@ -52,3 +52,5 @@ the ConstPtrs in a separate table, and loading from the table. It gives improved warm-up time and memory usage, and also removes annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,54 +1,7 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 +.. this is a revision shortly after release-5.1 +.. startrev: fb4f0a20239b -.. branch: s390x-backend - -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. - -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces - -.. branch: win32-lib-name - -.. branch: remove-frame-forcing-in-executioncontext - -.. branch: rposix-for-3 - -Wrap more POSIX functions in `rpython.rlib.rposix`. - -.. branch: cleanup-history-rewriting - -A local clean-up in the JIT front-end. - -.. branch: jit-constptr-2 - -Remove the forced minor collection that occurs when rewriting the -assembler at the start of the JIT backend. This is done by emitting -the ConstPtrs in a separate table, and loading from the table. It -gives improved warm-up time and memory usage, and also removes -annoying special-purpose code for pinned pointers. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -231,7 +219,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -265,6 +254,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -272,10 +263,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -363,7 +350,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -792,10 +780,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -804,7 +793,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -821,6 +811,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -896,13 +891,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -995,7 +996,7 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] @@ -1010,19 +1011,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1039,8 +1049,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1147,7 +1159,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1189,14 +1202,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,69 +1,9 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H +/* For testing ndarrayobject only */ + #include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -91,18 +31,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - /* * These characters correspond to the array type and the struct * module @@ -157,27 +85,6 @@ }; typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -186,7 +93,6 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -200,729 +106,6 @@ NPY_KEEPORDER=2 } NPY_ORDER; -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of UPDATEIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* * C API: consists of Macros and functions. The MACROS are defined @@ -937,850 +120,4 @@ #define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) #define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define _PyArray_ITER_NEXT3(it) do { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] \ - (it)->backstrides[1]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_GOTO(multi, dest) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} while (0) - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* - * Multi-iterator portion --- needs to be present in this - * order to work with PyArray_Broadcast - */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* - * if subspace iteration, then this is the array of axes in - * the underlying array represented by the index objects - */ - int iteraxes[NPY_MAXDIMS]; - /* - * if subspace iteration, the these are the coordinates to the - * start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - /* - * consec is first used to indicate wether fancy indices are - * consecutive and then denotes at which axis they are inserted - */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* - * Neighborhood points coordinates are computed relatively to the - * point pointed by _internal_iter - */ - PyArrayIterObject* _internal_iter; - /* - * To keep a reference to the representation of the constant value - * for constant padding - */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* - * Include inline implementations - functions defined there are not - * considered public API - */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -//#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type */ From pypy.commits at gmail.com Tue Apr 12 16:23:32 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:32 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <570d5944.88c8c20a.221a2.fffff295@mx.google.com> Author: mattip Branch: Changeset: r83644:2180e1eaf6f6 Date: 2016-04-12 23:20 +0300 http://bitbucket.org/pypy/pypy/changeset/2180e1eaf6f6/ Log: document merged branch diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -54,3 +54,9 @@ annoying special-purpose code for pinned pointers. .. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs From pypy.commits at gmail.com Tue Apr 12 16:23:34 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:34 -0700 (PDT) Subject: [pypy-commit] pypy default: restart whatsnew-head Message-ID: <570d5946.d3301c0a.f5fd9.4a8c@mx.google.com> Author: mattip Branch: Changeset: r83645:b7248ab6bbd8 Date: 2016-04-12 23:21 +0300 http://bitbucket.org/pypy/pypy/changeset/b7248ab6bbd8/ Log: restart whatsnew-head diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: fb4f0a20239b +.. startrev: 2180e1eaf6f6 From pypy.commits at gmail.com Tue Apr 12 16:23:36 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 13:23:36 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: merge default into branch Message-ID: <570d5948.aa0ac20a.5e95e.1225@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83646:abc514a1cfa1 Date: 2016-04-12 23:22 +0300 http://bitbucket.org/pypy/pypy/changeset/abc514a1cfa1/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -54,3 +54,9 @@ annoying special-purpose code for pinned pointers. .. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: fb4f0a20239b +.. startrev: 2180e1eaf6f6 From pypy.commits at gmail.com Wed Apr 13 00:47:59 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 12 Apr 2016 21:47:59 -0700 (PDT) Subject: [pypy-commit] pypy default: update binary version Message-ID: <570dcf7f.022ec20a.75620.ffffb1c0@mx.google.com> Author: mattip Branch: Changeset: r83647:17a05d1c445a Date: 2016-04-13 07:47 +0300 http://bitbucket.org/pypy/pypy/changeset/17a05d1c445a/ Log: update binary version diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -30,7 +30,7 @@ /* PyPy version as a string */ #define PYPY_VERSION "5.2.0-alpha0" -#define PYPY_VERSION_NUM 0x05010000 +#define PYPY_VERSION_NUM 0x05020000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object From pypy.commits at gmail.com Wed Apr 13 02:51:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Apr 2016 23:51:07 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: extended the return value of get_location Message-ID: <570dec5b.e21bc20a.6fe4c.ffff95f2@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83648:95f36b9150e4 Date: 2016-04-13 08:50 +0200 http://bitbucket.org/pypy/pypy/changeset/95f36b9150e4/ Log: extended the return value of get_location diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -44,8 +44,12 @@ def get_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names name = opcode_method_names[ord(bytecode.co_code[next_instr])] + # we can probably do better at co_firstlineno? return (bytecode.co_filename, - )'%s #%d %s' % (bytecode.get_repr(), next_instr, name) + bytecode.co_firstlineno, + bytecode.co_name, + next_instr, + name) def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -54,19 +54,24 @@ chr((val >> 16) & 0xff), chr((val >> 24) & 0xff)]) + + at always_inline +def encode_le_64bit(val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff), + chr((val >> 32) & 0xff), + chr((val >> 40) & 0xff), + chr((val >> 48) & 0xff), + chr((val >> 56)& 0xff)]) + @always_inline def encode_le_addr(val): if IS_32_BIT: - return encode_be_32bit(val) + return encode_le_32bit(val) else: - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff), - chr((val >> 32) & 0xff), - chr((val >> 40) & 0xff), - chr((val >> 48) & 0xff), - chr((val >> 56)& 0xff)]) + return encode_le_64bit(val) class VMProfJitLogger(object): @@ -197,8 +202,14 @@ def encode_debug_info(self, op): log = self.logger jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - filename, = jd_sd.warmstate.get_location(op.getarglist()[3:]) - log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, encode_str(filename)) + filename, lineno, enclosed, index, opname = jd_sd.warmstate.get_location(op.getarglist()[3:]) + line = [] + line.append(encode_str(filename)) + line.append(encode_le_16bit(lineno)) + line.append(encode_str(enclosed)) + line.append(encode_le_64bit(index)) + line.append(encode_str(opname)) + log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, ''.join(line)) def encode_op(self, op): diff --git a/rpython/jit/metainterp/test/test_jitlog.py b/rpython/jit/metainterp/test/test_jitlog.py --- a/rpython/jit/metainterp/test/test_jitlog.py +++ b/rpython/jit/metainterp/test/test_jitlog.py @@ -1,5 +1,7 @@ from rpython.jit.tool.oparser import pure_parse from rpython.jit.metainterp import jitlog +from rpython.jit.metainterp.jitlog import (encode_str, encode_le_16bit, + encode_le_64bit) from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.backend.model import AbstractCPU @@ -12,11 +14,9 @@ class FakeJitDriver(object): class warmstate(object): @staticmethod - def get_location_str(): - return 'string #3 BYTE_CODE' def get_location(greenkey_list): assert len(greenkey_list) == 0 - return '/home/pypy/jit.py', 0 + return '/home/pypy/jit.py', 0, 'enclosed', 99, 'DEL' class FakeMetaInterpSd: cpu = AbstractCPU() @@ -40,22 +40,10 @@ logger.finish() binary = file.read() assert binary.startswith(b'\x00\x04\x00\x00\x00loop') - assert binary.endswith(b'\x24\x06\x00\x00\x00string\x00\x00\x00\x00\x00\x00\x00\x00') + assert binary.endswith(b'\x24' + \ + encode_str('/home/pypy/jit.py') + \ + encode_le_16bit(0) + \ + encode_str('enclosed') + \ + encode_le_64bit(99) + \ + encode_str('DEL')) - def test_debug_merge_point(self, tmpdir): - logger = jitlog.VMProfJitLogger() - file = tmpdir.join('binary_file') - file.ensure() - fd = file.open('wb') - logger.cintf.jitlog_init(fd.fileno()) - log_trace = logger.log_trace(0, self.make_metainterp_sd(), None) - op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) - log_trace.write([], [op]) - #the next line will close 'fd' - fd.close() - logger.finish() - binary = file.read() - assert binary.startswith(b'\x00\x04\x00\x00\x00loop') - assert binary.endswith(b'\x24\x06\x00\x00\x00string\x00\x00\x00\x00\x00\x00\x00\x00') - - diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -564,6 +564,7 @@ def make_driverhook_graphs(self): s_Str = annmodel.SomeString() + s_Int = annmodel.SomeInteger() # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: @@ -579,6 +580,9 @@ jd._should_unroll_one_iteration_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.should_unroll_one_iteration, annmodel.s_Bool) + s_Tuple = annmodel.SomeTuple([s_Str, s_Int, s_Str, s_Int, s_Str]) + jd._get_location_ptr = self._make_hook_graph(jd, + annhelper, jd.jitdriver.get_location, s_Tuple) annhelper.finish() def _make_hook_graph(self, jitdriver_sd, annhelper, func, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -682,11 +682,11 @@ get_location_ptr = self.jitdriver_sd._get_location_ptr if get_location_ptr is None: missing = '(%s: no get_location)' % drivername - def get_location_str(greenkey): - return missing + def get_location(greenkey): + return (missing, 0, '', 0, '') else: unwrap_greenkey = self.make_unwrap_greenkey() - def get_location_str(greenkey): + def get_location(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) llres = fn(*greenargs) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -605,6 +605,26 @@ can_never_inline=None, should_unroll_one_iteration=None, name='jitdriver', check_untranslated=True, vectorize=False, get_unique_id=None, is_recursive=False, get_location=None): + """ + get_location: + The return value is designed to provide enough information to express the + state of an interpreter when invoking jit_merge_point. + For a bytecode interperter such as PyPy this includes, filename, line number, + function name, and more information. However, it should also be able to express + the same state for an interpreter that evaluates an AST. + return paremter: + 0 -> filename. An absolute path specifying the file the interpreter invoked. + If the input source is no file it should start with the + prefix: "string://" + 1 -> line number. The line number in filename. This should at least point to + the enclosing name. It can however point to the specific + source line of the instruction executed by the interpreter. + 2 -> enclosing name. E.g. the function name. + 3 -> index. 64 bit number indicating the execution progress. It can either be + an offset to byte code, or an index to the node in an AST + 4 -> operation name. a name further describing the current program counter. + this can be either a byte code name or the name of an AST node + """ if greens is not None: self.greens = greens self.name = name From pypy.commits at gmail.com Wed Apr 13 04:17:14 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Apr 2016 01:17:14 -0700 (PDT) Subject: [pypy-commit] pypy default: tested & impl pair allocation. there was an edge case which caused the allocation to fail. Message-ID: <570e008a.2457c20a.61d0d.ffffaafb@mx.google.com> Author: Richard Plangger Branch: Changeset: r83649:580534e4ba07 Date: 2016-04-13 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/580534e4ba07/ Log: tested & impl pair allocation. there was an edge case which caused the allocation to fail. diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -178,6 +178,7 @@ bind_first: the even register will be bound to bindvar, if bind_first == False: the odd register will be bound + NOTE: Calling ensure_even_odd_pair twice in a prepare function is NOT supported! """ self._check_type(origvar) prev_loc = self.loc(origvar, must_exist=must_exist) @@ -227,9 +228,6 @@ i = len(self.free_regs)-1 while i >= 0: even = self.free_regs[i] - if even.value == 13: - i -= 1 - continue if even.is_even(): # found an even registers that is actually free odd = r.odd_reg(even) @@ -309,11 +307,50 @@ self.reg_bindings[odd_var] = odd break else: - # no break! this is bad. really bad - raise NoVariableToSpill() + # uff! in this case, we need to move a forbidden var to another register + assert len(forbidden_vars) <= 8 # otherwise it is NOT possible to complete + even, odd = r.r2, r.r3 + even_var = reverse_mapping.get(even, None) + odd_var = reverse_mapping.get(odd, None) + if even_var: + if even_var in forbidden_vars: + self._relocate_forbidden_variable(even, even_var, reverse_mapping, + forbidden_vars, odd) + else: + self._sync_var(even_var) + if odd_var: + if odd_var in forbidden_vars: + self._relocate_forbidden_variable(odd, odd_var, reverse_mapping, + forbidden_vars, even) + else: + self._sync_var(odd_var) + + self.free_regs = [fr for fr in self.free_regs \ + if fr is not even and \ + fr is not odd] + self.reg_bindings[even_var] = even + self.reg_bindings[odd_var] = odd + return even, odd return even, odd + def _relocate_forbidden_variable(self, reg, var, reverse_mapping, forbidden_vars, forbidden_reg): + for candidate in r.MANAGED_REGS: + if candidate is reg or candidate is forbidden_reg: + continue + if candidate not in forbidden_vars: + var = reverse_mapping.get(candidate, None) + if var is not None: + self._sync_var(var) + self.assembler.regalloc_mov(reg, candidate) + self.reg_bindings[var] = candidate + reverse_mapping[reg] = var + self.free_regs.append(reg) + break + else: + raise NoVariableToSpill + + class ZARCHFrameManager(FrameManager): def __init__(self, base_ofs): FrameManager.__init__(self) diff --git a/rpython/jit/backend/zarch/test/test_regalloc.py b/rpython/jit/backend/zarch/test/test_regalloc.py --- a/rpython/jit/backend/zarch/test/test_regalloc.py +++ b/rpython/jit/backend/zarch/test/test_regalloc.py @@ -44,6 +44,20 @@ assert self.rm.reg_bindings[a] == r.r2 assert self.rm.reg_bindings[b] == r.r3 + def test_cannot_spill_too_many_forbidden_vars(self): + v = temp_vars(12) + a, b = v[10], v[11] + self.rm.frame_manager.bindings[a] = self.rm.frame_manager.loc(a) + self.rm.frame_manager.bindings[b] = self.rm.frame_manager.loc(b) + # all registers are allocated + self.rm.allocate((2,v[0]),(3,v[1]),(4,v[2]),(5,v[3]), + (6,v[4]),(7,v[5]),(8,v[6]),(9,v[7]), + (10,v[8]),(11,v[9])) + self.rm.temp_boxes = v[:-2] + with py.test.raises(AssertionError): + # assert len(forbidden_vars) <= 8 + self.rm.ensure_even_odd_pair(a, b, bind_first=False) + def test_all_but_one_forbidden(self): a,b,f1,f2,f3,f4,o = temp_vars(7) self.rm.allocate((2,f1),(4,f2),(6,f3),(8,f4),(10,o)) @@ -51,12 +65,6 @@ assert self.rm.reg_bindings[a] == r.r10 assert self.rm.reg_bindings[b] == r.r11 - def test_cannot_spill(self): - a,b,f1,f2,f3,f4,f5 = temp_vars(7) - self.rm.allocate((2,f1),(4,f2),(6,f3),(8,f4),(10,f5)) - with py.test.raises(NoVariableToSpill): - self.rm.force_allocate_reg_pair(a, b, [f1,f2,f3,f4,f5]) - def test_all_but_one_forbidden_odd(self): a,b,f1,f2,f3,f4,f5 = temp_vars(7) self.rm.allocate((3,f1),(5,f2),(7,f3),(9,f4),(11,f5)) @@ -86,6 +94,25 @@ assert a not in self.rm.reg_bindings assert self.rm.assembler.move_count == 2 + def test_ensure_pair_fully_allocated_first_forbidden(self): + v = temp_vars(12) + a, b = v[10], v[11] + self.rm.frame_manager.bindings[a] = self.rm.frame_manager.loc(a) + self.rm.frame_manager.bindings[b] = self.rm.frame_manager.loc(b) + # all registers are allocated + self.rm.allocate((2,v[0]),(3,v[1]),(4,v[2]),(5,v[3]), + (6,v[4]),(7,v[5]),(8,v[6]),(9,v[7]), + (10,v[8]),(11,v[9])) + self.rm.temp_boxes = [v[0],v[2],v[4],v[6],v[8]] + e, o = self.rm.ensure_even_odd_pair(a, b, bind_first=False) + assert e == r.r2 + assert o == r.r3 + + self.rm.temp_boxes = [v[0],v[1],v[2],v[4],v[6],v[8]] + e, o = self.rm.ensure_even_odd_pair(a, b, bind_first=False) + assert e == r.r2 + assert o == r.r3 + def run(inputargs, ops): cpu = CPU(None, None) cpu.setup_once() From pypy.commits at gmail.com Wed Apr 13 10:55:17 2016 From: pypy.commits at gmail.com (catalin_m) Date: Wed, 13 Apr 2016 07:55:17 -0700 (PDT) Subject: [pypy-commit] pypy detect_cpu_count: (catalin_m): Updated CPU detection code in rpython/config/support.py. Now returns the actual number of available cores. Added a non-linux implementation as well. Updated test. Message-ID: <570e5dd5.6614c20a.ebfb5.4ec5@mx.google.com> Author: Catalin Gabriel Manciu Branch: detect_cpu_count Changeset: r83650:b0229fe0397d Date: 2016-04-13 17:45 +0300 http://bitbucket.org/pypy/pypy/changeset/b0229fe0397d/ Log: (catalin_m): Updated CPU detection code in rpython/config/support.py. Now returns the actual number of available cores. Added a non-linux implementation as well. Updated test. diff --git a/rpython/config/support.py b/rpython/config/support.py --- a/rpython/config/support.py +++ b/rpython/config/support.py @@ -12,21 +12,21 @@ elif sys.platform.startswith('freebsd'): return sysctl_get_cpu_count('/sbin/sysctl') elif not sys.platform.startswith('linux'): - return 1 # implement me + try: + import multiprocessing + return multiprocessing.cpu_count() + except: + return 1 # try to use cpu_count on other platforms or fallback to 1 try: if isinstance(filename_or_file, str): f = open(filename_or_file, "r") else: f = filename_or_file - count = max([int(re.split('processor.*(\d+)', line)[1]) - for line in f.readlines() - if line.startswith('processor')]) + 1 - if count >= 4: - return max(count // 2, 3) - else: - return count + return max([int(re.split('processor.*?(\d+)', line)[1]) + for line in f.readlines() + if line.startswith('processor')]) + 1 # returning the actual number of available CPUs except: - return 1 # we really don't want to explode here, at worst we have 1 + return 1 # we really don't want to explode here, at worst we have 1 def sysctl_get_cpu_count(cmd, name='hw.ncpu'): try: diff --git a/rpython/config/test/test_support.py b/rpython/config/test/test_support.py --- a/rpython/config/test/test_support.py +++ b/rpython/config/test/test_support.py @@ -45,7 +45,7 @@ saved = os.environ try: os.environ = FakeEnviron(None) - assert support.detect_number_of_processors(StringIO(cpuinfo)) == 3 + assert support.detect_number_of_processors(StringIO(cpuinfo)) == 4 assert support.detect_number_of_processors('random crap that does not exist') == 1 os.environ = FakeEnviron('-j2') assert support.detect_number_of_processors(StringIO(cpuinfo)) == 1 From pypy.commits at gmail.com Wed Apr 13 12:43:52 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 13 Apr 2016 09:43:52 -0700 (PDT) Subject: [pypy-commit] pypy default: document changes between 5.0 and this release Message-ID: <570e7748.858e1c0a.c6973.ffffcaae@mx.google.com> Author: mattip Branch: Changeset: r83651:7b80713a6225 Date: 2016-04-13 19:42 +0300 http://bitbucket.org/pypy/pypy/changeset/7b80713a6225/ Log: document changes between 5.0 and this release diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -2,12 +2,11 @@ PyPy 5.1 ======== -We have released PyPy 5.1, about two months after PyPy 5.0.1. +We have released PyPy 5.1, about a month after PyPy 5.0. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata. - -We now fully support the IBM s390x architecture. +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. You can download the PyPy 5.1 release here: @@ -52,22 +51,46 @@ .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org -Other Highlights (since 5.0.1 released in Febuary 2015) +Other Highlights (since 5.0 released in March 2015) ========================================================= * New features: - * + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. - * + * Add better support for PyUnicodeObject in the C-API compatibility layer - * + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility * Bug Fixes - * + * Catch exceptions raised in an exit function - * + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Support glibc < 2.16 on ARM + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at @@ -75,21 +98,32 @@ * Numpy: - * + * Implemented numpy.where for a single argument - * + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy * Performance improvements: - * + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting - * + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend * Internal refactorings: - * + * Use a simpler logger to speed up translation - * + * Drop vestiges of Python 2.5 support in testing .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html From pypy.commits at gmail.com Wed Apr 13 13:28:34 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 13 Apr 2016 10:28:34 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/33_fix_itertools (pull request #425) Message-ID: <570e81c2.2976c20a.11f84.ffff8d9e@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83655:a26ea4a8b7fd Date: 2016-04-13 18:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a26ea4a8b7fd/ Log: Merged in marky1991/pypy_new/33_fix_itertools (pull request #425) Py3k: Fix bug in itertools PR. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1117,7 +1117,7 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.unpackiterable(arg_w) for arg_w in args_w + space.unpackiterable(arg_w)[:] for arg_w in args_w ] * space.int_w(w_repeat) # for gear in self.gears: @@ -1508,6 +1508,7 @@ else: self.started = True return w_result + def descr_reduce(self, space): if self.raised_stop_iteration: pool_w = [] @@ -1609,14 +1610,14 @@ self.w_total = self.space.call_function(self.w_func, self.w_total, w_value) return self.w_total - def descr_reduce(self): + def reduce_w(self): space = self.space w_total = space.w_None if self.w_total is None else self.w_total w_func = space.w_None if self.w_func is None else self.w_func return space.newtuple([space.gettypefor(W_Accumulate), space.newtuple([self.w_iterable, w_func]), w_total]) - def descr_setstate(self, space, w_state): + def setstate_w(self, space, w_state): self.w_total = w_state if not space.is_w(w_state, space.w_None) else None def W_Accumulate__new__(space, w_subtype, w_iterable, w_func=None): @@ -1628,8 +1629,8 @@ __new__ = interp2app(W_Accumulate__new__), __iter__ = interp2app(W_Accumulate.iter_w), __next__ = interp2app(W_Accumulate.next_w), - __reduce__ = interp2app(W_Accumulate.descr_reduce), - __setstate__ = interp2app(W_Accumulate.descr_setstate), + __reduce__ = interp2app(W_Accumulate.reduce_w), + __setstate__ = interp2app(W_Accumulate.setstate_w), __doc__ = """\ "accumulate(iterable) --> accumulate object From pypy.commits at gmail.com Wed Apr 13 13:28:43 2016 From: pypy.commits at gmail.com (marky1991) Date: Wed, 13 Apr 2016 10:28:43 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Undo unnecessary name change. Message-ID: <570e81cb.63dfc20a.74bb8.2f61@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83654:46b9381a3b80 Date: 2016-04-12 19:55 -0400 http://bitbucket.org/pypy/pypy/changeset/46b9381a3b80/ Log: Undo unnecessary name change. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1607,14 +1607,14 @@ self.w_total = self.space.call_function(self.w_func, self.w_total, w_value) return self.w_total - def descr_reduce(self): + def reduce_w(self): space = self.space w_total = space.w_None if self.w_total is None else self.w_total w_func = space.w_None if self.w_func is None else self.w_func return space.newtuple([space.gettypefor(W_Accumulate), space.newtuple([self.w_iterable, w_func]), w_total]) - def descr_setstate(self, space, w_state): + def setstate_w(self, space, w_state): self.w_total = w_state if not space.is_w(w_state, space.w_None) else None def W_Accumulate__new__(space, w_subtype, w_iterable, w_func=None): @@ -1626,8 +1626,8 @@ __new__ = interp2app(W_Accumulate__new__), __iter__ = interp2app(W_Accumulate.iter_w), __next__ = interp2app(W_Accumulate.next_w), - __reduce__ = interp2app(W_Accumulate.descr_reduce), - __setstate__ = interp2app(W_Accumulate.descr_setstate), + __reduce__ = interp2app(W_Accumulate.reduce_w), + __setstate__ = interp2app(W_Accumulate.setstate_w), __doc__ = """\ "accumulate(iterable) --> accumulate object From pypy.commits at gmail.com Wed Apr 13 13:28:39 2016 From: pypy.commits at gmail.com (marky1991) Date: Wed, 13 Apr 2016 10:28:39 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: That 'unnecessary' change was required to make translation not fail. Add it back. Message-ID: <570e81c7.a12dc20a.361e2.6569@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83652:ca1b4e9652e6 Date: 2016-04-12 16:43 -0400 http://bitbucket.org/pypy/pypy/changeset/ca1b4e9652e6/ Log: That 'unnecessary' change was required to make translation not fail. Add it back. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1114,7 +1114,7 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.unpackiterable(arg_w) for arg_w in args_w + space.unpackiterable(arg_w)[:] for arg_w in args_w ] * space.int_w(w_repeat) # for gear in self.gears: From pypy.commits at gmail.com Wed Apr 13 13:28:41 2016 From: pypy.commits at gmail.com (marky1991) Date: Wed, 13 Apr 2016 10:28:41 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Add newlines to match PEP8 and as requested. Message-ID: <570e81c9.519d1c0a.bc370.ffffe101@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r83653:2822467985c4 Date: 2016-04-12 16:56 -0400 http://bitbucket.org/pypy/pypy/changeset/2822467985c4/ Log: Add newlines to match PEP8 and as requested. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1199,6 +1199,7 @@ space.newtuple([space.newtuple([])]) ] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): gear_count = len(self.gears) indices_w = space.unpackiterable(w_state) @@ -1504,6 +1505,7 @@ else: self.started = True return w_result + def descr_reduce(self, space): if self.raised_stop_iteration: pool_w = [] @@ -1525,6 +1527,7 @@ space.wrap(self.started) ])] return space.newtuple(result_w) + def descr_setstate(self, space, w_state): state = space.unpackiterable(w_state) if len(state) == 3: From pypy.commits at gmail.com Wed Apr 13 14:43:25 2016 From: pypy.commits at gmail.com (amauryfa) Date: Wed, 13 Apr 2016 11:43:25 -0700 (PDT) Subject: [pypy-commit] pypy ast-arena: Add an 'Arena' object to the constructor of all AST nodes. Message-ID: <570e934d.83561c0a.1f2da.ffffee89@mx.google.com> Author: Amaury Forgeot d'Arc Branch: ast-arena Changeset: r83656:8e2938af872d Date: 2016-04-13 08:25 +0200 http://bitbucket.org/pypy/pypy/changeset/8e2938af872d/ Log: Add an 'Arena' object to the constructor of all AST nodes. The goal is to allocate the nodes inside the arena, outside of the GC, and deallocate all of them at the same time, in order to reduce gc pressure. Not used so far. diff too long, truncating to 2000 out of 3469 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -159,26 +159,31 @@ return space.fromcache(State) class mod(AST): + + def __init__(self, arena): + pass + @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): return None if space.isinstance_w(w_node, get(space).w_Module): - return Module.from_object(space, w_node) + return Module.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Interactive): - return Interactive.from_object(space, w_node) + return Interactive.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Expression): - return Expression.from_object(space, w_node) + return Expression.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Suite): - return Suite.from_object(space, w_node) + return Suite.from_object(space, arena, w_node) raise oefmt(space.w_TypeError, "Expected mod node, got %T", w_node) State.ast_type('mod', 'AST', None, []) class Module(mod): - def __init__(self, body): + def __init__(self, arena, body): self.body = body + mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Module(self) @@ -199,19 +204,20 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] - return Module(_body) + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] + return Module(arena, _body) State.ast_type('Module', 'mod', ['body']) class Interactive(mod): - def __init__(self, body): + def __init__(self, arena, body): self.body = body + mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -232,19 +238,20 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] - return Interactive(_body) + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] + return Interactive(arena, _body) State.ast_type('Interactive', 'mod', ['body']) class Expression(mod): - def __init__(self, body): + def __init__(self, arena, body): self.body = body + mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Expression(self) @@ -260,18 +267,19 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) - _body = expr.from_object(space, w_body) - return Expression(_body) + _body = expr.from_object(space, arena, w_body) + return Expression(arena, _body) State.ast_type('Expression', 'mod', ['body']) class Suite(mod): - def __init__(self, body): + def __init__(self, arena, body): self.body = body + mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Suite(self) @@ -292,83 +300,83 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] - return Suite(_body) + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] + return Suite(arena, _body) State.ast_type('Suite', 'mod', ['body']) class stmt(AST): - def __init__(self, lineno, col_offset): + def __init__(self, arena, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): return None if space.isinstance_w(w_node, get(space).w_FunctionDef): - return FunctionDef.from_object(space, w_node) + return FunctionDef.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_ClassDef): - return ClassDef.from_object(space, w_node) + return ClassDef.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Return): - return Return.from_object(space, w_node) + return Return.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Delete): - return Delete.from_object(space, w_node) + return Delete.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Assign): - return Assign.from_object(space, w_node) + return Assign.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_AugAssign): - return AugAssign.from_object(space, w_node) + return AugAssign.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Print): - return Print.from_object(space, w_node) + return Print.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_For): - return For.from_object(space, w_node) + return For.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_While): - return While.from_object(space, w_node) + return While.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_If): - return If.from_object(space, w_node) + return If.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_With): - return With.from_object(space, w_node) + return With.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Raise): - return Raise.from_object(space, w_node) + return Raise.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_TryExcept): - return TryExcept.from_object(space, w_node) + return TryExcept.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_TryFinally): - return TryFinally.from_object(space, w_node) + return TryFinally.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Assert): - return Assert.from_object(space, w_node) + return Assert.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Import): - return Import.from_object(space, w_node) + return Import.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_ImportFrom): - return ImportFrom.from_object(space, w_node) + return ImportFrom.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Exec): - return Exec.from_object(space, w_node) + return Exec.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Global): - return Global.from_object(space, w_node) + return Global.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Expr): - return Expr.from_object(space, w_node) + return Expr.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Pass): - return Pass.from_object(space, w_node) + return Pass.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Break): - return Break.from_object(space, w_node) + return Break.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Continue): - return Continue.from_object(space, w_node) + return Continue.from_object(space, arena, w_node) raise oefmt(space.w_TypeError, "Expected stmt node, got %T", w_node) State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset']) class FunctionDef(stmt): - def __init__(self, name, args, body, decorator_list, lineno, col_offset): + def __init__(self, arena, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args self.body = body self.decorator_list = decorator_list - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -406,7 +414,7 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_name = get_field(space, w_node, 'name', False) w_args = get_field(space, w_node, 'args', False) w_body = get_field(space, w_node, 'body', False) @@ -414,26 +422,26 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _name = space.realstr_w(w_name) - _args = arguments.from_object(space, w_args) + _args = arguments.from_object(space, arena, w_args) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) - _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _decorator_list = [expr.from_object(space, arena, w_item) for w_item in decorator_list_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset) + return FunctionDef(arena, _name, _args, _body, _decorator_list, _lineno, _col_offset) State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list']) class ClassDef(stmt): - def __init__(self, name, bases, body, decorator_list, lineno, col_offset): + def __init__(self, arena, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases self.body = body self.decorator_list = decorator_list - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -476,7 +484,7 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_name = get_field(space, w_node, 'name', False) w_bases = get_field(space, w_node, 'bases', False) w_body = get_field(space, w_node, 'body', False) @@ -485,23 +493,23 @@ w_col_offset = get_field(space, w_node, 'col_offset', False) _name = space.realstr_w(w_name) bases_w = space.unpackiterable(w_bases) - _bases = [expr.from_object(space, w_item) for w_item in bases_w] + _bases = [expr.from_object(space, arena, w_item) for w_item in bases_w] body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) - _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _decorator_list = [expr.from_object(space, arena, w_item) for w_item in decorator_list_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset) + return ClassDef(arena, _name, _bases, _body, _decorator_list, _lineno, _col_offset) State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list']) class Return(stmt): - def __init__(self, value, lineno, col_offset): + def __init__(self, arena, value, lineno, col_offset): self.value = value - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Return(self) @@ -522,23 +530,23 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Return(_value, _lineno, _col_offset) + return Return(arena, _value, _lineno, _col_offset) State.ast_type('Return', 'stmt', ['value']) class Delete(stmt): - def __init__(self, targets, lineno, col_offset): + def __init__(self, arena, targets, lineno, col_offset): self.targets = targets - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Delete(self) @@ -563,25 +571,25 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_targets = get_field(space, w_node, 'targets', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) targets_w = space.unpackiterable(w_targets) - _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _targets = [expr.from_object(space, arena, w_item) for w_item in targets_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Delete(_targets, _lineno, _col_offset) + return Delete(arena, _targets, _lineno, _col_offset) State.ast_type('Delete', 'stmt', ['targets']) class Assign(stmt): - def __init__(self, targets, value, lineno, col_offset): + def __init__(self, arena, targets, value, lineno, col_offset): self.targets = targets self.value = value - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Assign(self) @@ -609,28 +617,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_targets = get_field(space, w_node, 'targets', False) w_value = get_field(space, w_node, 'value', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) targets_w = space.unpackiterable(w_targets) - _targets = [expr.from_object(space, w_item) for w_item in targets_w] - _value = expr.from_object(space, w_value) + _targets = [expr.from_object(space, arena, w_item) for w_item in targets_w] + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Assign(_targets, _value, _lineno, _col_offset) + return Assign(arena, _targets, _value, _lineno, _col_offset) State.ast_type('Assign', 'stmt', ['targets', 'value']) class AugAssign(stmt): - def __init__(self, target, op, value, lineno, col_offset): + def __init__(self, arena, target, op, value, lineno, col_offset): self.target = target self.op = op self.value = value - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -655,29 +663,29 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_target = get_field(space, w_node, 'target', False) w_op = get_field(space, w_node, 'op', False) w_value = get_field(space, w_node, 'value', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _target = expr.from_object(space, w_target) - _op = operator.from_object(space, w_op) - _value = expr.from_object(space, w_value) + _target = expr.from_object(space, arena, w_target) + _op = operator.from_object(space, arena, w_op) + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return AugAssign(_target, _op, _value, _lineno, _col_offset) + return AugAssign(arena, _target, _op, _value, _lineno, _col_offset) State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) class Print(stmt): - def __init__(self, dest, values, nl, lineno, col_offset): + def __init__(self, arena, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values self.nl = nl - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Print(self) @@ -708,31 +716,31 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_dest = get_field(space, w_node, 'dest', True) w_values = get_field(space, w_node, 'values', False) w_nl = get_field(space, w_node, 'nl', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _dest = expr.from_object(space, w_dest) + _dest = expr.from_object(space, arena, w_dest) values_w = space.unpackiterable(w_values) - _values = [expr.from_object(space, w_item) for w_item in values_w] + _values = [expr.from_object(space, arena, w_item) for w_item in values_w] _nl = space.bool_w(w_nl) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Print(_dest, _values, _nl, _lineno, _col_offset) + return Print(arena, _dest, _values, _nl, _lineno, _col_offset) State.ast_type('Print', 'stmt', ['dest', 'values', 'nl']) class For(stmt): - def __init__(self, target, iter, body, orelse, lineno, col_offset): + def __init__(self, arena, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter self.body = body self.orelse = orelse - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_For(self) @@ -771,33 +779,33 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_target = get_field(space, w_node, 'target', False) w_iter = get_field(space, w_node, 'iter', False) w_body = get_field(space, w_node, 'body', False) w_orelse = get_field(space, w_node, 'orelse', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _target = expr.from_object(space, w_target) - _iter = expr.from_object(space, w_iter) + _target = expr.from_object(space, arena, w_target) + _iter = expr.from_object(space, arena, w_iter) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) - _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _orelse = [stmt.from_object(space, arena, w_item) for w_item in orelse_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return For(_target, _iter, _body, _orelse, _lineno, _col_offset) + return For(arena, _target, _iter, _body, _orelse, _lineno, _col_offset) State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse']) class While(stmt): - def __init__(self, test, body, orelse, lineno, col_offset): + def __init__(self, arena, test, body, orelse, lineno, col_offset): self.test = test self.body = body self.orelse = orelse - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_While(self) @@ -833,31 +841,31 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_test = get_field(space, w_node, 'test', False) w_body = get_field(space, w_node, 'body', False) w_orelse = get_field(space, w_node, 'orelse', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _test = expr.from_object(space, w_test) + _test = expr.from_object(space, arena, w_test) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) - _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _orelse = [stmt.from_object(space, arena, w_item) for w_item in orelse_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return While(_test, _body, _orelse, _lineno, _col_offset) + return While(arena, _test, _body, _orelse, _lineno, _col_offset) State.ast_type('While', 'stmt', ['test', 'body', 'orelse']) class If(stmt): - def __init__(self, test, body, orelse, lineno, col_offset): + def __init__(self, arena, test, body, orelse, lineno, col_offset): self.test = test self.body = body self.orelse = orelse - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_If(self) @@ -893,31 +901,31 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_test = get_field(space, w_node, 'test', False) w_body = get_field(space, w_node, 'body', False) w_orelse = get_field(space, w_node, 'orelse', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _test = expr.from_object(space, w_test) + _test = expr.from_object(space, arena, w_test) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] orelse_w = space.unpackiterable(w_orelse) - _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _orelse = [stmt.from_object(space, arena, w_item) for w_item in orelse_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return If(_test, _body, _orelse, _lineno, _col_offset) + return If(arena, _test, _body, _orelse, _lineno, _col_offset) State.ast_type('If', 'stmt', ['test', 'body', 'orelse']) class With(stmt): - def __init__(self, context_expr, optional_vars, body, lineno, col_offset): + def __init__(self, arena, context_expr, optional_vars, body, lineno, col_offset): self.context_expr = context_expr self.optional_vars = optional_vars self.body = body - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_With(self) @@ -949,30 +957,30 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_context_expr = get_field(space, w_node, 'context_expr', False) w_optional_vars = get_field(space, w_node, 'optional_vars', True) w_body = get_field(space, w_node, 'body', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _context_expr = expr.from_object(space, w_context_expr) - _optional_vars = expr.from_object(space, w_optional_vars) + _context_expr = expr.from_object(space, arena, w_context_expr) + _optional_vars = expr.from_object(space, arena, w_optional_vars) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return With(_context_expr, _optional_vars, _body, _lineno, _col_offset) + return With(arena, _context_expr, _optional_vars, _body, _lineno, _col_offset) State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body']) class Raise(stmt): - def __init__(self, type, inst, tback, lineno, col_offset): + def __init__(self, arena, type, inst, tback, lineno, col_offset): self.type = type self.inst = inst self.tback = tback - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Raise(self) @@ -1001,29 +1009,29 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_type = get_field(space, w_node, 'type', True) w_inst = get_field(space, w_node, 'inst', True) w_tback = get_field(space, w_node, 'tback', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _type = expr.from_object(space, w_type) - _inst = expr.from_object(space, w_inst) - _tback = expr.from_object(space, w_tback) + _type = expr.from_object(space, arena, w_type) + _inst = expr.from_object(space, arena, w_inst) + _tback = expr.from_object(space, arena, w_tback) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Raise(_type, _inst, _tback, _lineno, _col_offset) + return Raise(arena, _type, _inst, _tback, _lineno, _col_offset) State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback']) class TryExcept(stmt): - def __init__(self, body, handlers, orelse, lineno, col_offset): + def __init__(self, arena, body, handlers, orelse, lineno, col_offset): self.body = body self.handlers = handlers self.orelse = orelse - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -1064,31 +1072,31 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) w_handlers = get_field(space, w_node, 'handlers', False) w_orelse = get_field(space, w_node, 'orelse', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] handlers_w = space.unpackiterable(w_handlers) - _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + _handlers = [excepthandler.from_object(space, arena, w_item) for w_item in handlers_w] orelse_w = space.unpackiterable(w_orelse) - _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _orelse = [stmt.from_object(space, arena, w_item) for w_item in orelse_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) + return TryExcept(arena, _body, _handlers, _orelse, _lineno, _col_offset) State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) class TryFinally(stmt): - def __init__(self, body, finalbody, lineno, col_offset): + def __init__(self, arena, body, finalbody, lineno, col_offset): self.body = body self.finalbody = finalbody - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -1121,28 +1129,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) w_finalbody = get_field(space, w_node, 'finalbody', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) body_w = space.unpackiterable(w_body) - _body = [stmt.from_object(space, w_item) for w_item in body_w] + _body = [stmt.from_object(space, arena, w_item) for w_item in body_w] finalbody_w = space.unpackiterable(w_finalbody) - _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] + _finalbody = [stmt.from_object(space, arena, w_item) for w_item in finalbody_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return TryFinally(_body, _finalbody, _lineno, _col_offset) + return TryFinally(arena, _body, _finalbody, _lineno, _col_offset) State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) class Assert(stmt): - def __init__(self, test, msg, lineno, col_offset): + def __init__(self, arena, test, msg, lineno, col_offset): self.test = test self.msg = msg - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Assert(self) @@ -1166,25 +1174,25 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_test = get_field(space, w_node, 'test', False) w_msg = get_field(space, w_node, 'msg', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _test = expr.from_object(space, w_test) - _msg = expr.from_object(space, w_msg) + _test = expr.from_object(space, arena, w_test) + _msg = expr.from_object(space, arena, w_msg) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Assert(_test, _msg, _lineno, _col_offset) + return Assert(arena, _test, _msg, _lineno, _col_offset) State.ast_type('Assert', 'stmt', ['test', 'msg']) class Import(stmt): - def __init__(self, names, lineno, col_offset): + def __init__(self, arena, names, lineno, col_offset): self.names = names - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Import(self) @@ -1209,26 +1217,26 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_names = get_field(space, w_node, 'names', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) names_w = space.unpackiterable(w_names) - _names = [alias.from_object(space, w_item) for w_item in names_w] + _names = [alias.from_object(space, arena, w_item) for w_item in names_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Import(_names, _lineno, _col_offset) + return Import(arena, _names, _lineno, _col_offset) State.ast_type('Import', 'stmt', ['names']) class ImportFrom(stmt): - def __init__(self, module, names, level, lineno, col_offset): + def __init__(self, arena, module, names, level, lineno, col_offset): self.module = module self.names = names self.level = level - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ImportFrom(self) @@ -1257,7 +1265,7 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_module = get_field(space, w_node, 'module', True) w_names = get_field(space, w_node, 'names', False) w_level = get_field(space, w_node, 'level', True) @@ -1265,22 +1273,22 @@ w_col_offset = get_field(space, w_node, 'col_offset', False) _module = space.str_or_None_w(w_module) names_w = space.unpackiterable(w_names) - _names = [alias.from_object(space, w_item) for w_item in names_w] + _names = [alias.from_object(space, arena, w_item) for w_item in names_w] _level = space.int_w(w_level) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return ImportFrom(_module, _names, _level, _lineno, _col_offset) + return ImportFrom(arena, _module, _names, _level, _lineno, _col_offset) State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level']) class Exec(stmt): - def __init__(self, body, globals, locals, lineno, col_offset): + def __init__(self, arena, body, globals, locals, lineno, col_offset): self.body = body self.globals = globals self.locals = locals - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Exec(self) @@ -1308,27 +1316,27 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_body = get_field(space, w_node, 'body', False) w_globals = get_field(space, w_node, 'globals', True) w_locals = get_field(space, w_node, 'locals', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _body = expr.from_object(space, w_body) - _globals = expr.from_object(space, w_globals) - _locals = expr.from_object(space, w_locals) + _body = expr.from_object(space, arena, w_body) + _globals = expr.from_object(space, arena, w_globals) + _locals = expr.from_object(space, arena, w_locals) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Exec(_body, _globals, _locals, _lineno, _col_offset) + return Exec(arena, _body, _globals, _locals, _lineno, _col_offset) State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals']) class Global(stmt): - def __init__(self, names, lineno, col_offset): + def __init__(self, arena, names, lineno, col_offset): self.names = names - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Global(self) @@ -1351,7 +1359,7 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_names = get_field(space, w_node, 'names', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) @@ -1359,16 +1367,16 @@ _names = [space.realstr_w(w_item) for w_item in names_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Global(_names, _lineno, _col_offset) + return Global(arena, _names, _lineno, _col_offset) State.ast_type('Global', 'stmt', ['names']) class Expr(stmt): - def __init__(self, value, lineno, col_offset): + def __init__(self, arena, value, lineno, col_offset): self.value = value - stmt.__init__(self, lineno, col_offset) + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1388,22 +1396,22 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Expr(_value, _lineno, _col_offset) + return Expr(arena, _value, _lineno, _col_offset) State.ast_type('Expr', 'stmt', ['value']) class Pass(stmt): - def __init__(self, lineno, col_offset): - stmt.__init__(self, lineno, col_offset) + def __init__(self, arena, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Pass(self) @@ -1420,20 +1428,20 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Pass(_lineno, _col_offset) + return Pass(arena, _lineno, _col_offset) State.ast_type('Pass', 'stmt', []) class Break(stmt): - def __init__(self, lineno, col_offset): - stmt.__init__(self, lineno, col_offset) + def __init__(self, arena, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Break(self) @@ -1450,20 +1458,20 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Break(_lineno, _col_offset) + return Break(arena, _lineno, _col_offset) State.ast_type('Break', 'stmt', []) class Continue(stmt): - def __init__(self, lineno, col_offset): - stmt.__init__(self, lineno, col_offset) + def __init__(self, arena, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Continue(self) @@ -1480,82 +1488,82 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Continue(_lineno, _col_offset) + return Continue(arena, _lineno, _col_offset) State.ast_type('Continue', 'stmt', []) class expr(AST): - def __init__(self, lineno, col_offset): + def __init__(self, arena, lineno, col_offset): self.lineno = lineno self.col_offset = col_offset @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): return None if space.isinstance_w(w_node, get(space).w_BoolOp): - return BoolOp.from_object(space, w_node) + return BoolOp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_BinOp): - return BinOp.from_object(space, w_node) + return BinOp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_UnaryOp): - return UnaryOp.from_object(space, w_node) + return UnaryOp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Lambda): - return Lambda.from_object(space, w_node) + return Lambda.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_IfExp): - return IfExp.from_object(space, w_node) + return IfExp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Dict): - return Dict.from_object(space, w_node) + return Dict.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Set): - return Set.from_object(space, w_node) + return Set.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_ListComp): - return ListComp.from_object(space, w_node) + return ListComp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_SetComp): - return SetComp.from_object(space, w_node) + return SetComp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_DictComp): - return DictComp.from_object(space, w_node) + return DictComp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_GeneratorExp): - return GeneratorExp.from_object(space, w_node) + return GeneratorExp.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Yield): - return Yield.from_object(space, w_node) + return Yield.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Compare): - return Compare.from_object(space, w_node) + return Compare.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Call): - return Call.from_object(space, w_node) + return Call.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Repr): - return Repr.from_object(space, w_node) + return Repr.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Num): - return Num.from_object(space, w_node) + return Num.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Str): - return Str.from_object(space, w_node) + return Str.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Attribute): - return Attribute.from_object(space, w_node) + return Attribute.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Subscript): - return Subscript.from_object(space, w_node) + return Subscript.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Name): - return Name.from_object(space, w_node) + return Name.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_List): - return List.from_object(space, w_node) + return List.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Tuple): - return Tuple.from_object(space, w_node) + return Tuple.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Const): - return Const.from_object(space, w_node) + return Const.from_object(space, arena, w_node) raise oefmt(space.w_TypeError, "Expected expr node, got %T", w_node) State.ast_type('expr', 'AST', None, ['lineno', 'col_offset']) class BoolOp(expr): - def __init__(self, op, values, lineno, col_offset): + def __init__(self, arena, op, values, lineno, col_offset): self.op = op self.values = values - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_BoolOp(self) @@ -1582,28 +1590,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_op = get_field(space, w_node, 'op', False) w_values = get_field(space, w_node, 'values', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _op = boolop.from_object(space, w_op) + _op = boolop.from_object(space, arena, w_op) values_w = space.unpackiterable(w_values) - _values = [expr.from_object(space, w_item) for w_item in values_w] + _values = [expr.from_object(space, arena, w_item) for w_item in values_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return BoolOp(_op, _values, _lineno, _col_offset) + return BoolOp(arena, _op, _values, _lineno, _col_offset) State.ast_type('BoolOp', 'expr', ['op', 'values']) class BinOp(expr): - def __init__(self, left, op, right, lineno, col_offset): + def __init__(self, arena, left, op, right, lineno, col_offset): self.left = left self.op = op self.right = right - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_BinOp(self) @@ -1628,28 +1636,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_left = get_field(space, w_node, 'left', False) w_op = get_field(space, w_node, 'op', False) w_right = get_field(space, w_node, 'right', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _left = expr.from_object(space, w_left) - _op = operator.from_object(space, w_op) - _right = expr.from_object(space, w_right) + _left = expr.from_object(space, arena, w_left) + _op = operator.from_object(space, arena, w_op) + _right = expr.from_object(space, arena, w_right) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return BinOp(_left, _op, _right, _lineno, _col_offset) + return BinOp(arena, _left, _op, _right, _lineno, _col_offset) State.ast_type('BinOp', 'expr', ['left', 'op', 'right']) class UnaryOp(expr): - def __init__(self, op, operand, lineno, col_offset): + def __init__(self, arena, op, operand, lineno, col_offset): self.op = op self.operand = operand - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_UnaryOp(self) @@ -1671,26 +1679,26 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_op = get_field(space, w_node, 'op', False) w_operand = get_field(space, w_node, 'operand', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _op = unaryop.from_object(space, w_op) - _operand = expr.from_object(space, w_operand) + _op = unaryop.from_object(space, arena, w_op) + _operand = expr.from_object(space, arena, w_operand) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return UnaryOp(_op, _operand, _lineno, _col_offset) + return UnaryOp(arena, _op, _operand, _lineno, _col_offset) State.ast_type('UnaryOp', 'expr', ['op', 'operand']) class Lambda(expr): - def __init__(self, args, body, lineno, col_offset): + def __init__(self, arena, args, body, lineno, col_offset): self.args = args self.body = body - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Lambda(self) @@ -1713,27 +1721,27 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_args = get_field(space, w_node, 'args', False) w_body = get_field(space, w_node, 'body', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _args = arguments.from_object(space, w_args) - _body = expr.from_object(space, w_body) + _args = arguments.from_object(space, arena, w_args) + _body = expr.from_object(space, arena, w_body) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Lambda(_args, _body, _lineno, _col_offset) + return Lambda(arena, _args, _body, _lineno, _col_offset) State.ast_type('Lambda', 'expr', ['args', 'body']) class IfExp(expr): - def __init__(self, test, body, orelse, lineno, col_offset): + def __init__(self, arena, test, body, orelse, lineno, col_offset): self.test = test self.body = body self.orelse = orelse - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_IfExp(self) @@ -1759,28 +1767,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_test = get_field(space, w_node, 'test', False) w_body = get_field(space, w_node, 'body', False) w_orelse = get_field(space, w_node, 'orelse', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _test = expr.from_object(space, w_test) - _body = expr.from_object(space, w_body) - _orelse = expr.from_object(space, w_orelse) + _test = expr.from_object(space, arena, w_test) + _body = expr.from_object(space, arena, w_body) + _orelse = expr.from_object(space, arena, w_orelse) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return IfExp(_test, _body, _orelse, _lineno, _col_offset) + return IfExp(arena, _test, _body, _orelse, _lineno, _col_offset) State.ast_type('IfExp', 'expr', ['test', 'body', 'orelse']) class Dict(expr): - def __init__(self, keys, values, lineno, col_offset): + def __init__(self, arena, keys, values, lineno, col_offset): self.keys = keys self.values = values - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Dict(self) @@ -1813,27 +1821,27 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_keys = get_field(space, w_node, 'keys', False) w_values = get_field(space, w_node, 'values', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) keys_w = space.unpackiterable(w_keys) - _keys = [expr.from_object(space, w_item) for w_item in keys_w] + _keys = [expr.from_object(space, arena, w_item) for w_item in keys_w] values_w = space.unpackiterable(w_values) - _values = [expr.from_object(space, w_item) for w_item in values_w] + _values = [expr.from_object(space, arena, w_item) for w_item in values_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Dict(_keys, _values, _lineno, _col_offset) + return Dict(arena, _keys, _values, _lineno, _col_offset) State.ast_type('Dict', 'expr', ['keys', 'values']) class Set(expr): - def __init__(self, elts, lineno, col_offset): + def __init__(self, arena, elts, lineno, col_offset): self.elts = elts - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Set(self) @@ -1858,25 +1866,25 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elts = get_field(space, w_node, 'elts', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) elts_w = space.unpackiterable(w_elts) - _elts = [expr.from_object(space, w_item) for w_item in elts_w] + _elts = [expr.from_object(space, arena, w_item) for w_item in elts_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Set(_elts, _lineno, _col_offset) + return Set(arena, _elts, _lineno, _col_offset) State.ast_type('Set', 'expr', ['elts']) class ListComp(expr): - def __init__(self, elt, generators, lineno, col_offset): + def __init__(self, arena, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ListComp(self) @@ -1904,27 +1912,27 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elt = get_field(space, w_node, 'elt', False) w_generators = get_field(space, w_node, 'generators', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _elt = expr.from_object(space, w_elt) + _elt = expr.from_object(space, arena, w_elt) generators_w = space.unpackiterable(w_generators) - _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] + _generators = [comprehension.from_object(space, arena, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return ListComp(_elt, _generators, _lineno, _col_offset) + return ListComp(arena, _elt, _generators, _lineno, _col_offset) State.ast_type('ListComp', 'expr', ['elt', 'generators']) class SetComp(expr): - def __init__(self, elt, generators, lineno, col_offset): + def __init__(self, arena, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_SetComp(self) @@ -1952,28 +1960,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elt = get_field(space, w_node, 'elt', False) w_generators = get_field(space, w_node, 'generators', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _elt = expr.from_object(space, w_elt) + _elt = expr.from_object(space, arena, w_elt) generators_w = space.unpackiterable(w_generators) - _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] + _generators = [comprehension.from_object(space, arena, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return SetComp(_elt, _generators, _lineno, _col_offset) + return SetComp(arena, _elt, _generators, _lineno, _col_offset) State.ast_type('SetComp', 'expr', ['elt', 'generators']) class DictComp(expr): - def __init__(self, key, value, generators, lineno, col_offset): + def __init__(self, arena, key, value, generators, lineno, col_offset): self.key = key self.value = value self.generators = generators - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_DictComp(self) @@ -2004,29 +2012,29 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_key = get_field(space, w_node, 'key', False) w_value = get_field(space, w_node, 'value', False) w_generators = get_field(space, w_node, 'generators', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _key = expr.from_object(space, w_key) - _value = expr.from_object(space, w_value) + _key = expr.from_object(space, arena, w_key) + _value = expr.from_object(space, arena, w_value) generators_w = space.unpackiterable(w_generators) - _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] + _generators = [comprehension.from_object(space, arena, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return DictComp(_key, _value, _generators, _lineno, _col_offset) + return DictComp(arena, _key, _value, _generators, _lineno, _col_offset) State.ast_type('DictComp', 'expr', ['key', 'value', 'generators']) class GeneratorExp(expr): - def __init__(self, elt, generators, lineno, col_offset): + def __init__(self, arena, elt, generators, lineno, col_offset): self.elt = elt self.generators = generators - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_GeneratorExp(self) @@ -2054,26 +2062,26 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elt = get_field(space, w_node, 'elt', False) w_generators = get_field(space, w_node, 'generators', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _elt = expr.from_object(space, w_elt) + _elt = expr.from_object(space, arena, w_elt) generators_w = space.unpackiterable(w_generators) - _generators = [comprehension.from_object(space, w_item) for w_item in generators_w] + _generators = [comprehension.from_object(space, arena, w_item) for w_item in generators_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return GeneratorExp(_elt, _generators, _lineno, _col_offset) + return GeneratorExp(arena, _elt, _generators, _lineno, _col_offset) State.ast_type('GeneratorExp', 'expr', ['elt', 'generators']) class Yield(expr): - def __init__(self, value, lineno, col_offset): + def __init__(self, arena, value, lineno, col_offset): self.value = value - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Yield(self) @@ -2094,25 +2102,25 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Yield(_value, _lineno, _col_offset) + return Yield(arena, _value, _lineno, _col_offset) State.ast_type('Yield', 'expr', ['value']) class Compare(expr): - def __init__(self, left, ops, comparators, lineno, col_offset): + def __init__(self, arena, left, ops, comparators, lineno, col_offset): self.left = left self.ops = ops self.comparators = comparators - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Compare(self) @@ -2146,33 +2154,33 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_left = get_field(space, w_node, 'left', False) w_ops = get_field(space, w_node, 'ops', False) w_comparators = get_field(space, w_node, 'comparators', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _left = expr.from_object(space, w_left) + _left = expr.from_object(space, arena, w_left) ops_w = space.unpackiterable(w_ops) - _ops = [cmpop.from_object(space, w_item) for w_item in ops_w] + _ops = [cmpop.from_object(space, arena, w_item) for w_item in ops_w] comparators_w = space.unpackiterable(w_comparators) - _comparators = [expr.from_object(space, w_item) for w_item in comparators_w] + _comparators = [expr.from_object(space, arena, w_item) for w_item in comparators_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Compare(_left, _ops, _comparators, _lineno, _col_offset) + return Compare(arena, _left, _ops, _comparators, _lineno, _col_offset) State.ast_type('Compare', 'expr', ['left', 'ops', 'comparators']) class Call(expr): - def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset): + def __init__(self, arena, func, args, keywords, starargs, kwargs, lineno, col_offset): self.func = func self.args = args self.keywords = keywords self.starargs = starargs self.kwargs = kwargs - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Call(self) @@ -2216,7 +2224,7 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_func = get_field(space, w_node, 'func', False) w_args = get_field(space, w_node, 'args', False) w_keywords = get_field(space, w_node, 'keywords', False) @@ -2224,25 +2232,25 @@ w_kwargs = get_field(space, w_node, 'kwargs', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _func = expr.from_object(space, w_func) + _func = expr.from_object(space, arena, w_func) args_w = space.unpackiterable(w_args) - _args = [expr.from_object(space, w_item) for w_item in args_w] + _args = [expr.from_object(space, arena, w_item) for w_item in args_w] keywords_w = space.unpackiterable(w_keywords) - _keywords = [keyword.from_object(space, w_item) for w_item in keywords_w] - _starargs = expr.from_object(space, w_starargs) - _kwargs = expr.from_object(space, w_kwargs) + _keywords = [keyword.from_object(space, arena, w_item) for w_item in keywords_w] + _starargs = expr.from_object(space, arena, w_starargs) + _kwargs = expr.from_object(space, arena, w_kwargs) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Call(_func, _args, _keywords, _starargs, _kwargs, _lineno, _col_offset) + return Call(arena, _func, _args, _keywords, _starargs, _kwargs, _lineno, _col_offset) State.ast_type('Call', 'expr', ['func', 'args', 'keywords', 'starargs', 'kwargs']) class Repr(expr): - def __init__(self, value, lineno, col_offset): + def __init__(self, arena, value, lineno, col_offset): self.value = value - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Repr(self) @@ -2262,23 +2270,23 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, arena, w_value) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Repr(_value, _lineno, _col_offset) + return Repr(arena, _value, _lineno, _col_offset) State.ast_type('Repr', 'expr', ['value']) class Num(expr): - def __init__(self, n, lineno, col_offset): + def __init__(self, arena, n, lineno, col_offset): self.n = n - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Num(self) @@ -2297,23 +2305,23 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_n = get_field(space, w_node, 'n', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _n = w_n _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Num(_n, _lineno, _col_offset) + return Num(arena, _n, _lineno, _col_offset) State.ast_type('Num', 'expr', ['n']) class Str(expr): - def __init__(self, s, lineno, col_offset): + def __init__(self, arena, s, lineno, col_offset): self.s = s - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Str(self) @@ -2332,25 +2340,25 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_s = get_field(space, w_node, 's', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _s = check_string(space, w_s) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Str(_s, _lineno, _col_offset) + return Str(arena, _s, _lineno, _col_offset) State.ast_type('Str', 'expr', ['s']) class Attribute(expr): - def __init__(self, value, attr, ctx, lineno, col_offset): + def __init__(self, arena, value, attr, ctx, lineno, col_offset): self.value = value self.attr = attr self.ctx = ctx - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Attribute(self) @@ -2374,29 +2382,29 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) w_attr = get_field(space, w_node, 'attr', False) w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, arena, w_value) _attr = space.realstr_w(w_attr) - _ctx = expr_context.from_object(space, w_ctx) + _ctx = expr_context.from_object(space, arena, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Attribute(_value, _attr, _ctx, _lineno, _col_offset) + return Attribute(arena, _value, _attr, _ctx, _lineno, _col_offset) State.ast_type('Attribute', 'expr', ['value', 'attr', 'ctx']) class Subscript(expr): - def __init__(self, value, slice, ctx, lineno, col_offset): + def __init__(self, arena, value, slice, ctx, lineno, col_offset): self.value = value self.slice = slice self.ctx = ctx - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Subscript(self) @@ -2421,28 +2429,28 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) w_slice = get_field(space, w_node, 'slice', False) w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) - _slice = slice.from_object(space, w_slice) - _ctx = expr_context.from_object(space, w_ctx) + _value = expr.from_object(space, arena, w_value) + _slice = slice.from_object(space, arena, w_slice) + _ctx = expr_context.from_object(space, arena, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Subscript(_value, _slice, _ctx, _lineno, _col_offset) + return Subscript(arena, _value, _slice, _ctx, _lineno, _col_offset) State.ast_type('Subscript', 'expr', ['value', 'slice', 'ctx']) class Name(expr): - def __init__(self, id, ctx, lineno, col_offset): + def __init__(self, arena, id, ctx, lineno, col_offset): self.id = id self.ctx = ctx - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Name(self) @@ -2463,26 +2471,26 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_id = get_field(space, w_node, 'id', False) w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _id = space.realstr_w(w_id) - _ctx = expr_context.from_object(space, w_ctx) + _ctx = expr_context.from_object(space, arena, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Name(_id, _ctx, _lineno, _col_offset) + return Name(arena, _id, _ctx, _lineno, _col_offset) State.ast_type('Name', 'expr', ['id', 'ctx']) class List(expr): - def __init__(self, elts, ctx, lineno, col_offset): + def __init__(self, arena, elts, ctx, lineno, col_offset): self.elts = elts self.ctx = ctx - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_List(self) @@ -2509,27 +2517,27 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elts = get_field(space, w_node, 'elts', False) w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) elts_w = space.unpackiterable(w_elts) - _elts = [expr.from_object(space, w_item) for w_item in elts_w] - _ctx = expr_context.from_object(space, w_ctx) + _elts = [expr.from_object(space, arena, w_item) for w_item in elts_w] + _ctx = expr_context.from_object(space, arena, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return List(_elts, _ctx, _lineno, _col_offset) + return List(arena, _elts, _ctx, _lineno, _col_offset) State.ast_type('List', 'expr', ['elts', 'ctx']) class Tuple(expr): - def __init__(self, elts, ctx, lineno, col_offset): + def __init__(self, arena, elts, ctx, lineno, col_offset): self.elts = elts self.ctx = ctx - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Tuple(self) @@ -2556,26 +2564,26 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_elts = get_field(space, w_node, 'elts', False) w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) elts_w = space.unpackiterable(w_elts) - _elts = [expr.from_object(space, w_item) for w_item in elts_w] - _ctx = expr_context.from_object(space, w_ctx) + _elts = [expr.from_object(space, arena, w_item) for w_item in elts_w] + _ctx = expr_context.from_object(space, arena, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Tuple(_elts, _ctx, _lineno, _col_offset) + return Tuple(arena, _elts, _ctx, _lineno, _col_offset) State.ast_type('Tuple', 'expr', ['elts', 'ctx']) class Const(expr): - def __init__(self, value, lineno, col_offset): + def __init__(self, arena, value, lineno, col_offset): self.value = value - expr.__init__(self, lineno, col_offset) + expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Const(self) @@ -2594,21 +2602,21 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = w_value _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Const(_value, _lineno, _col_offset) + return Const(arena, _value, _lineno, _col_offset) State.ast_type('Const', 'expr', ['value']) class expr_context(AST): @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): if space.isinstance_w(w_node, get(space).w_Load): return 1 if space.isinstance_w(w_node, get(space).w_Store): @@ -2672,18 +2680,22 @@ ] class slice(AST): + + def __init__(self, arena): + pass + @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): return None if space.isinstance_w(w_node, get(space).w_Ellipsis): - return Ellipsis.from_object(space, w_node) + return Ellipsis.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Slice): - return Slice.from_object(space, w_node) + return Slice.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_ExtSlice): - return ExtSlice.from_object(space, w_node) + return ExtSlice.from_object(space, arena, w_node) if space.isinstance_w(w_node, get(space).w_Index): - return Index.from_object(space, w_node) + return Index.from_object(space, arena, w_node) raise oefmt(space.w_TypeError, "Expected slice node, got %T", w_node) State.ast_type('slice', 'AST', None, []) @@ -2702,18 +2714,19 @@ return w_node @staticmethod - def from_object(space, w_node): - return Ellipsis() + def from_object(space, arena, w_node): + return Ellipsis(arena) State.ast_type('Ellipsis', 'slice', []) class Slice(slice): - def __init__(self, lower, upper, step): + def __init__(self, arena, lower, upper, step): self.lower = lower self.upper = upper self.step = step + slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Slice(self) @@ -2738,22 +2751,23 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_lower = get_field(space, w_node, 'lower', True) w_upper = get_field(space, w_node, 'upper', True) w_step = get_field(space, w_node, 'step', True) - _lower = expr.from_object(space, w_lower) - _upper = expr.from_object(space, w_upper) - _step = expr.from_object(space, w_step) - return Slice(_lower, _upper, _step) + _lower = expr.from_object(space, arena, w_lower) + _upper = expr.from_object(space, arena, w_upper) + _step = expr.from_object(space, arena, w_step) + return Slice(arena, _lower, _upper, _step) State.ast_type('Slice', 'slice', ['lower', 'upper', 'step']) class ExtSlice(slice): - def __init__(self, dims): + def __init__(self, arena, dims): self.dims = dims + slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_ExtSlice(self) @@ -2774,19 +2788,20 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_dims = get_field(space, w_node, 'dims', False) dims_w = space.unpackiterable(w_dims) - _dims = [slice.from_object(space, w_item) for w_item in dims_w] - return ExtSlice(_dims) + _dims = [slice.from_object(space, arena, w_item) for w_item in dims_w] + return ExtSlice(arena, _dims) State.ast_type('ExtSlice', 'slice', ['dims']) class Index(slice): - def __init__(self, value): + def __init__(self, arena, value): self.value = value + slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Index(self) @@ -2802,17 +2817,17 @@ return w_node @staticmethod - def from_object(space, w_node): + def from_object(space, arena, w_node): w_value = get_field(space, w_node, 'value', False) - _value = expr.from_object(space, w_value) - return Index(_value) + _value = expr.from_object(space, arena, w_value) + return Index(arena, _value) State.ast_type('Index', 'slice', ['value']) From pypy.commits at gmail.com Wed Apr 13 14:43:27 2016 From: pypy.commits at gmail.com (amauryfa) Date: Wed, 13 Apr 2016 11:43:27 -0700 (PDT) Subject: [pypy-commit] pypy ast-arena: Some progress. The interpret() tests still don't pass. Message-ID: <570e934f.8a37c20a.12947.ffffb683@mx.google.com> Author: Amaury Forgeot d'Arc Branch: ast-arena Changeset: r83657:6b016173668b Date: 2016-04-13 20:36 +0200 http://bitbucket.org/pypy/pypy/changeset/6b016173668b/ Log: Some progress. The interpret() tests still don't pass. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -33,6 +33,9 @@ class AST(object): __metaclass__ = extendabletype + def __init__(self, arena): + pass + def walkabout(self, visitor): raise AssertionError("walkabout() implementation not provided") @@ -160,9 +163,6 @@ class mod(AST): - def __init__(self, arena): - pass - @staticmethod def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): @@ -182,8 +182,8 @@ class Module(mod): def __init__(self, arena, body): + mod.__init__(self, arena) self.body = body - mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Module(self) @@ -216,8 +216,8 @@ class Interactive(mod): def __init__(self, arena, body): + mod.__init__(self, arena) self.body = body - mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -250,8 +250,8 @@ class Expression(mod): def __init__(self, arena, body): + mod.__init__(self, arena) self.body = body - mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Expression(self) @@ -278,8 +278,8 @@ class Suite(mod): def __init__(self, arena, body): + mod.__init__(self, arena) self.body = body - mod.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Suite(self) @@ -312,6 +312,7 @@ class stmt(AST): def __init__(self, arena, lineno, col_offset): + AST.__init__(self, arena) self.lineno = lineno self.col_offset = col_offset @@ -372,11 +373,11 @@ class FunctionDef(stmt): def __init__(self, arena, name, args, body, decorator_list, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.name = name self.args = args self.body = body self.decorator_list = decorator_list - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -437,11 +438,11 @@ class ClassDef(stmt): def __init__(self, arena, name, bases, body, decorator_list, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.name = name self.bases = bases self.body = body self.decorator_list = decorator_list - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -508,8 +509,8 @@ class Return(stmt): def __init__(self, arena, value, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.value = value - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Return(self) @@ -545,8 +546,8 @@ class Delete(stmt): def __init__(self, arena, targets, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.targets = targets - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Delete(self) @@ -587,9 +588,9 @@ class Assign(stmt): def __init__(self, arena, targets, value, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.targets = targets self.value = value - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Assign(self) @@ -635,10 +636,10 @@ class AugAssign(stmt): def __init__(self, arena, target, op, value, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.target = target self.op = op self.value = value - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -682,10 +683,10 @@ class Print(stmt): def __init__(self, arena, dest, values, nl, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.dest = dest self.values = values self.nl = nl - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Print(self) @@ -736,11 +737,11 @@ class For(stmt): def __init__(self, arena, target, iter, body, orelse, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.target = target self.iter = iter self.body = body self.orelse = orelse - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_For(self) @@ -802,10 +803,10 @@ class While(stmt): def __init__(self, arena, test, body, orelse, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.test = test self.body = body self.orelse = orelse - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_While(self) @@ -862,10 +863,10 @@ class If(stmt): def __init__(self, arena, test, body, orelse, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.test = test self.body = body self.orelse = orelse - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_If(self) @@ -922,10 +923,10 @@ class With(stmt): def __init__(self, arena, context_expr, optional_vars, body, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.context_expr = context_expr self.optional_vars = optional_vars self.body = body - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_With(self) @@ -977,10 +978,10 @@ class Raise(stmt): def __init__(self, arena, type, inst, tback, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.type = type self.inst = inst self.tback = tback - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Raise(self) @@ -1028,10 +1029,10 @@ class TryExcept(stmt): def __init__(self, arena, body, handlers, orelse, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.body = body self.handlers = handlers self.orelse = orelse - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -1094,9 +1095,9 @@ class TryFinally(stmt): def __init__(self, arena, body, finalbody, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.body = body self.finalbody = finalbody - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -1148,9 +1149,9 @@ class Assert(stmt): def __init__(self, arena, test, msg, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.test = test self.msg = msg - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Assert(self) @@ -1191,8 +1192,8 @@ class Import(stmt): def __init__(self, arena, names, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.names = names - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Import(self) @@ -1233,10 +1234,10 @@ class ImportFrom(stmt): def __init__(self, arena, module, names, level, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.module = module self.names = names self.level = level - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ImportFrom(self) @@ -1285,10 +1286,10 @@ class Exec(stmt): def __init__(self, arena, body, globals, locals, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.body = body self.globals = globals self.locals = locals - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Exec(self) @@ -1335,8 +1336,8 @@ class Global(stmt): def __init__(self, arena, names, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.names = names - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Global(self) @@ -1375,8 +1376,8 @@ class Expr(stmt): def __init__(self, arena, value, lineno, col_offset): + stmt.__init__(self, arena, lineno, col_offset) self.value = value - stmt.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1501,6 +1502,7 @@ class expr(AST): def __init__(self, arena, lineno, col_offset): + AST.__init__(self, arena) self.lineno = lineno self.col_offset = col_offset @@ -1561,9 +1563,9 @@ class BoolOp(expr): def __init__(self, arena, op, values, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.op = op self.values = values - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_BoolOp(self) @@ -1608,10 +1610,10 @@ class BinOp(expr): def __init__(self, arena, left, op, right, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.left = left self.op = op self.right = right - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_BinOp(self) @@ -1655,9 +1657,9 @@ class UnaryOp(expr): def __init__(self, arena, op, operand, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.op = op self.operand = operand - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_UnaryOp(self) @@ -1696,9 +1698,9 @@ class Lambda(expr): def __init__(self, arena, args, body, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.args = args self.body = body - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Lambda(self) @@ -1738,10 +1740,10 @@ class IfExp(expr): def __init__(self, arena, test, body, orelse, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.test = test self.body = body self.orelse = orelse - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_IfExp(self) @@ -1786,9 +1788,9 @@ class Dict(expr): def __init__(self, arena, keys, values, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.keys = keys self.values = values - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Dict(self) @@ -1840,8 +1842,8 @@ class Set(expr): def __init__(self, arena, elts, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elts = elts - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Set(self) @@ -1882,9 +1884,9 @@ class ListComp(expr): def __init__(self, arena, elt, generators, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elt = elt self.generators = generators - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ListComp(self) @@ -1930,9 +1932,9 @@ class SetComp(expr): def __init__(self, arena, elt, generators, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elt = elt self.generators = generators - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_SetComp(self) @@ -1978,10 +1980,10 @@ class DictComp(expr): def __init__(self, arena, key, value, generators, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.key = key self.value = value self.generators = generators - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_DictComp(self) @@ -2032,9 +2034,9 @@ class GeneratorExp(expr): def __init__(self, arena, elt, generators, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elt = elt self.generators = generators - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_GeneratorExp(self) @@ -2080,8 +2082,8 @@ class Yield(expr): def __init__(self, arena, value, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.value = value - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Yield(self) @@ -2117,10 +2119,10 @@ class Compare(expr): def __init__(self, arena, left, ops, comparators, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.left = left self.ops = ops self.comparators = comparators - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Compare(self) @@ -2175,12 +2177,12 @@ class Call(expr): def __init__(self, arena, func, args, keywords, starargs, kwargs, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.func = func self.args = args self.keywords = keywords self.starargs = starargs self.kwargs = kwargs - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Call(self) @@ -2249,8 +2251,8 @@ class Repr(expr): def __init__(self, arena, value, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.value = value - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Repr(self) @@ -2285,8 +2287,8 @@ class Num(expr): def __init__(self, arena, n, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.n = n - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Num(self) @@ -2320,8 +2322,8 @@ class Str(expr): def __init__(self, arena, s, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.s = s - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Str(self) @@ -2355,10 +2357,10 @@ class Attribute(expr): def __init__(self, arena, value, attr, ctx, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.value = value self.attr = attr self.ctx = ctx - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Attribute(self) @@ -2401,10 +2403,10 @@ class Subscript(expr): def __init__(self, arena, value, slice, ctx, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.value = value self.slice = slice self.ctx = ctx - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Subscript(self) @@ -2448,9 +2450,9 @@ class Name(expr): def __init__(self, arena, id, ctx, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.id = id self.ctx = ctx - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Name(self) @@ -2488,9 +2490,9 @@ class List(expr): def __init__(self, arena, elts, ctx, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elts = elts self.ctx = ctx - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_List(self) @@ -2535,9 +2537,9 @@ class Tuple(expr): def __init__(self, arena, elts, ctx, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.elts = elts self.ctx = ctx - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Tuple(self) @@ -2582,8 +2584,8 @@ class Const(expr): def __init__(self, arena, value, lineno, col_offset): + expr.__init__(self, arena, lineno, col_offset) self.value = value - expr.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_Const(self) @@ -2681,9 +2683,6 @@ class slice(AST): - def __init__(self, arena): - pass - @staticmethod def from_object(space, arena, w_node): if space.is_w(w_node, space.w_None): @@ -2723,10 +2722,10 @@ class Slice(slice): def __init__(self, arena, lower, upper, step): + slice.__init__(self, arena) self.lower = lower self.upper = upper self.step = step - slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Slice(self) @@ -2766,8 +2765,8 @@ class ExtSlice(slice): def __init__(self, arena, dims): + slice.__init__(self, arena) self.dims = dims - slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_ExtSlice(self) @@ -2800,8 +2799,8 @@ class Index(slice): def __init__(self, arena, value): + slice.__init__(self, arena) self.value = value - slice.__init__(self, arena) def walkabout(self, visitor): visitor.visit_Index(self) @@ -3124,10 +3123,10 @@ class comprehension(AST): def __init__(self, arena, target, iter, ifs): + AST.__init__(self, arena) self.target = target self.iter = iter self.ifs = ifs - None.__init__(self, arena) def mutate_over(self, visitor): self.target = self.target.mutate_over(visitor) @@ -3169,6 +3168,7 @@ class excepthandler(AST): def __init__(self, arena, lineno, col_offset): + AST.__init__(self, arena) self.lineno = lineno self.col_offset = col_offset @@ -3185,10 +3185,10 @@ class ExceptHandler(excepthandler): def __init__(self, arena, type, name, body, lineno, col_offset): + excepthandler.__init__(self, arena, lineno, col_offset) self.type = type self.name = name self.body = body - excepthandler.__init__(self, arena, lineno, col_offset) def walkabout(self, visitor): visitor.visit_ExceptHandler(self) @@ -3241,11 +3241,11 @@ class arguments(AST): def __init__(self, arena, args, vararg, kwarg, defaults): + AST.__init__(self, arena) self.args = args self.vararg = vararg self.kwarg = kwarg self.defaults = defaults - None.__init__(self, arena) def mutate_over(self, visitor): if self.args: @@ -3296,9 +3296,9 @@ class keyword(AST): def __init__(self, arena, arg, value): + AST.__init__(self, arena) self.arg = arg self.value = value - None.__init__(self, arena) def mutate_over(self, visitor): self.value = self.value.mutate_over(visitor) @@ -3328,9 +3328,9 @@ class alias(AST): def __init__(self, arena, name, asname): + AST.__init__(self, arena) self.name = name self.asname = asname - None.__init__(self, arena) def mutate_over(self, visitor): return visitor.visit_alias(self) diff --git a/pypy/interpreter/astcompiler/astarena.py b/pypy/interpreter/astcompiler/astarena.py --- a/pypy/interpreter/astcompiler/astarena.py +++ b/pypy/interpreter/astcompiler/astarena.py @@ -1,2 +1,141 @@ +from rpython.rtyper import rmodel, rclass, rbuiltin +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.flowspace.model import Constant +from rpython.annotator import model as annmodel +from pypy.interpreter.astcompiler import ast +from rpython.rlib.rawstorage import alloc_raw_storage +from rpython.rtyper.annlowlevel import (cast_instance_to_gcref, + cast_gcref_to_instance) + +# This is the most important line! +ast.AST._alloc_flavor_ = 'raw' + class Arena(object): - pass + def __init__(self): + self.memory_blocks = [] + self.objects = [] + + def allocate(self, cls): + xxx + +def _all_subclasses(cls): + yield cls + for subclass in cls.__subclasses__(): + for c in _all_subclasses(subclass): + yield c + + +class SomeAstInstance(annmodel.SomeInstance): + def rtyper_makerepr(self, rtyper): + return _getinstancerepr(rtyper, self.classdef) + + +class SomeArena(annmodel.SomeInstance): + def rtyper_makerepr(self, rtyper): + return ArenaRepr() + +ARENA = lltype.GcStruct('Arena', + ('storage', llmemory.Address), + ('size', lltype.Signed), + ('current', lltype.Signed), + ) + +class ArenaRepr(rmodel.Repr): + lowleveltype = lltype.Ptr(ARENA) + + def rtyper_new(self, hop): + hop.exception_cannot_occur() + return hop.gendirectcall(self.ll_new) + + @staticmethod + def ll_new(): + ll_arena = lltype.malloc(ARENA) + SIZE = 1000 * 1000 + ll_arena.storage = llmemory.cast_ptr_to_adr(alloc_raw_storage( + SIZE, track_allocation=False, zero=False)) + ll_arena.size = SIZE + ll_arena.current = 0 + return ll_arena + + @staticmethod + def ll_allocate(ll_arena, TYPE): + size = 100 # XXX rffi.sizeof(TYPE.TO) + offset = ll_arena.current + assert offset + size < ll_arena.size + ll_arena.current += size + return rffi.cast(TYPE, ll_arena.storage + offset) + +class AstNodeRepr(rclass.InstanceRepr): + def alloc_instance(self, llops, classcallhop=None, nonmovable=False): + if classcallhop is None: + raise TyperError("must instantiate %r by calling the class" % ( + self.classdef,)) + hop = classcallhop + r_arena = hop.args_r[0] + v_arena = hop.inputarg(r_arena, 0) + v_size = hop.inputconst(lltype.Signed, + rffi.sizeof(self.lowleveltype)) + cTYPE = hop.inputconst(lltype.Void, self.lowleveltype) + v_ptr = hop.llops.gendirectcall(r_arena.ll_allocate, v_arena, cTYPE) + # return rbuiltin.gen_cast(llops, self.lowleveltype, v_ptr) + return v_ptr + + +def _getinstancerepr(rtyper, classdef): + # Almost a copy of rclass.getinstancerepr() + if classdef.basedef: + _getinstancerepr(rtyper, classdef.basedef) + flavor = rmodel.getgcflavor(classdef) + try: + result = rtyper.instance_reprs[classdef, flavor] + except KeyError: + result = AstNodeRepr(rtyper, classdef, gcflavor=flavor) + + rtyper.instance_reprs[classdef, flavor] = result + rtyper.add_pendingsetup(result) + return result + + +class ArenaEntry(ExtRegistryEntry): + _about_ = Arena + + def compute_result_annotation(self): + return SomeArena(self.bookkeeper.getuniqueclassdef(Arena)) + + def specialize_call(self, hop): + return hop.r_result.rtyper_new(hop) + + +class AstEntry(ExtRegistryEntry): + _about_ = tuple(_all_subclasses(ast.AST)) + + def compute_result_annotation(self, *args): + from rpython.annotator.argument import ArgumentsForTranslation + classdef = self.bookkeeper.getuniqueclassdef(self.instance) + s_init = classdef.classdesc.s_read_attribute('__init__') + s_instance = SomeAstInstance(classdef) + self.bookkeeper.emulate_pbc_call(classdef, s_init, + [s_instance] + list(args)) + return s_instance + + def specialize_call(self, hop): + from rpython.rtyper.rmodel import inputconst + from rpython.rtyper.lltypesystem.lltype import Void, Ptr + hop.exception_is_here() + s_instance = hop.s_result + object_type = hop.r_result.object_type + classdef = s_instance.classdef + rinstance = _getinstancerepr(hop.rtyper, classdef) + v_instance = rinstance.new_instance(hop.llops, hop) + # Call __init__ + s_init = classdef.classdesc.s_read_attribute('__init__') + v_init = Constant("init-func-dummy") # this value not really used + hop2 = hop.copy() + hop2.v_s_insertfirstarg(v_instance, s_instance) # add 'instance' + hop2.v_s_insertfirstarg(v_init, s_init) # add 'initfunc' + hop2.s_result = annmodel.s_None + hop2.r_result = hop.rtyper.getrepr(hop2.s_result) + hop2.dispatch() + return v_instance + diff --git a/pypy/interpreter/astcompiler/test/test_arena.py b/pypy/interpreter/astcompiler/test/test_arena.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/astcompiler/test/test_arena.py @@ -0,0 +1,35 @@ +from rpython.translator.c.test.test_genc import compile +from rpython.rtyper.test.test_llinterp import interpret +from pypy.interpreter.astcompiler import ast +from pypy.interpreter.astcompiler import astarena +from rpython.rtyper.test.tool import BaseRtypingTest + + +class TestArena(BaseRtypingTest): + def test_empty_module(self): + def run(): + arena = astarena.Arena() + node = ast.Module(arena, [ + ast.Name(arena, 'x', ast.Load, 1, 1)]) + return len(node.body) + assert run() == 1 + assert interpret(run, []) == 1 + fn = compile(run, []) + assert fn() == 1 + + def test_compile(self): + from pypy.interpreter.pyparser import pyparse + from pypy.interpreter.astcompiler import astbuilder + from pypy.objspace.fake.objspace import FakeObjSpace + space = FakeObjSpace() + def run(expr): + p = pyparse.PythonParser(space) + info = pyparse.CompileInfo("", 'exec') + arena = astarena.Arena() + cst = p.parse_source(expr, info) + ast = astbuilder.ast_from_node(space, arena, cst, info) + run("x=2") + # res = interpret(run, [self.string_to_ll("x=2")]) + fn = compile(run, [str]) + fn("x=2") + diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -84,15 +84,14 @@ else: self.emit("class %s(AST):" % (base,)) self.emit("") - args = "".join(", " + attr.name.value - for attr in sum.attributes) - self.emit("def __init__(self, arena%s):" % (args,), 1) if sum.attributes: + args = "".join(", " + attr.name.value + for attr in sum.attributes) + self.emit("def __init__(self, arena%s):" % (args,), 1) + self.emit("AST.__init__(self, arena)", 2) for attr in sum.attributes: self.visit(attr) - else: - self.emit("pass", 2) - self.emit("") + self.emit("") self.emit("@staticmethod", 1) self.emit("def from_object(space, arena, w_node):", 1) self.emit("if space.is_w(w_node, space.w_None):", 2) @@ -215,11 +214,11 @@ args = "arena" + "".join(", %s" % field.name for field in arg_fields) self.emit("def __init__(self, %s):" % args, 1) + base_args = "arena" + "".join(", %s" % field.name + for field in (extras or ())) + self.emit("%s.__init__(self, %s)" % (base or "AST", base_args), 2) for field in fields: self.visit(field) - base_args = "arena" + "".join(", %s" % field.name - for field in (extras or ())) - self.emit("%s.__init__(self, %s)" % (base, base_args), 2) def make_mutate_over(self, cons, name): self.emit("def mutate_over(self, visitor):", 1) @@ -420,6 +419,9 @@ class AST(object): __metaclass__ = extendabletype + def __init__(self, arena): + pass + def walkabout(self, visitor): raise AssertionError("walkabout() implementation not provided") diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -413,7 +413,12 @@ def get(self, name): name + "xx" # check that it's a string return w_some_obj() -FakeObjSpace.sys = FakeModule() + +class FakeSysModule(FakeModule): + def get_w_default_encoder(self): + return w_some_obj() + +FakeObjSpace.sys = FakeSysModule() FakeObjSpace.sys.filesystemencoding = 'foobar' FakeObjSpace.sys.defaultencoding = 'ascii' FakeObjSpace.builtin = FakeModule() diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -565,7 +565,7 @@ clsname = 'object' else: clsname = self.classdef.name - return '' % (clsname,) + return '<%s for %s>' % (self.__class__.__name__, clsname,) def compact_repr(self): if self.classdef is None: @@ -698,8 +698,7 @@ rbase = rbase.rbase return False - def new_instance(self, llops, classcallhop=None, nonmovable=False): - """Build a new instance, without calling __init__.""" + def alloc_instance(self, llops, classcallhop=None, nonmovable=False): flavor = self.gcflavor flags = {'flavor': flavor} if nonmovable: @@ -709,6 +708,11 @@ vlist = [ctype, cflags] vptr = llops.genop('malloc', vlist, resulttype=Ptr(self.object_type)) + return vptr + + def new_instance(self, llops, classcallhop=None, nonmovable=False): + """Build a new instance, without calling __init__.""" + vptr = self.alloc_instance(llops, classcallhop, nonmovable=nonmovable) ctypeptr = inputconst(CLASSTYPE, self.rclass.getvtable()) self.setfield(vptr, '__class__', ctypeptr, llops) # initialize instance attributes from their defaults from the class From pypy.commits at gmail.com Wed Apr 13 16:29:04 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Apr 2016 13:29:04 -0700 (PDT) Subject: [pypy-commit] pypy default: fix ppc for jit-constptr-2 Message-ID: <570eac10.647ac20a.c9b4c.00e9@mx.google.com> Author: Armin Rigo Branch: Changeset: r83658:911126fe2131 Date: 2016-04-13 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/911126fe2131/ Log: fix ppc for jit-constptr-2 diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -994,18 +994,6 @@ else: clt.invalidate_positions.append((guard_pos, relative_offset)) - def get_asmmemmgr_blocks(self, looptoken): - clt = looptoken.compiled_loop_token - if clt.asmmemmgr_blocks is None: - clt.asmmemmgr_blocks = [] - return clt.asmmemmgr_blocks - - def get_asmmemmgr_gcreftracers(self, looptoken): - clt = looptoken.compiled_loop_token - if clt.asmmemmgr_gcreftracers is None: - clt.asmmemmgr_gcreftracers = [] - return clt.asmmemmgr_gcreftracers - def _walk_operations(self, inputargs, operations, regalloc): fcond = c.AL self._regalloc = regalloc diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -161,6 +161,18 @@ assert self._allgcrefs_faildescr_next < len(self._allgcrefs) return self._allgcrefs_faildescr_next + def get_asmmemmgr_blocks(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_blocks is None: + clt.asmmemmgr_blocks = [] + return clt.asmmemmgr_blocks + + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def set_debug(self, v): r = self._debug self._debug = v diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py --- a/rpython/jit/backend/ppc/codebuilder.py +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -936,9 +936,9 @@ class PPCGuardToken(GuardToken): def __init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth, fcond=c.cond_none): + guard_opnum, frame_depth, faildescrindex, fcond=c.cond_none): GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) self.fcond = fcond @@ -1012,13 +1012,15 @@ self.load_imm(dest_reg, word) return diff - def load_from_addr(self, rD, addr): - assert rD is not r.r0 - diff = self.load_imm_plus(rD, addr) + def load_from_addr(self, rD, rT, addr): + # load [addr] into rD. rT is a temporary register which can be + # equal to rD, but can't be r0. + assert rT is not r.r0 + diff = self.load_imm_plus(rT, addr) if IS_PPC_32: - self.lwz(rD.value, rD.value, diff) + self.lwz(rD.value, rT.value, diff) else: - self.ld(rD.value, rD.value, diff) + self.ld(rD.value, rT.value, diff) def b_offset(self, target): curpos = self.currpos() @@ -1279,7 +1281,7 @@ oplist = [None] * (rop._LAST + 1) for key, val in rop.__dict__.items(): - if key.startswith("_"): + if key.startswith("_") or not isinstance(val, int): continue opname = key.lower() methname = "emit_%s" % opname diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -291,8 +291,10 @@ def build_guard_token(self, op, frame_depth, arglocs, fcond): descr = op.getdescr() gcmap = allocate_gcmap(self, frame_depth, r.JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(descr) token = PPCGuardToken(self.cpu, gcmap, descr, op.getfailargs(), arglocs, op.getopnum(), frame_depth, + faildescrindex, fcond) return token @@ -474,19 +476,19 @@ def emit_finish(self, op, arglocs, regalloc): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) > 1: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs if op.getarg(0).type == FLOAT: self.mc.stfd(return_val.value, r.SPP.value, base_ofs) else: self.mc.std(return_val.value, r.SPP.value, base_ofs) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - self.mc.load_imm(r.r5, fail_descr_loc.getint()) + descr = op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(descr) + self._load_from_gc_table(r.r5, r.r5, faildescrindex) # gcmap logic here: arglist = op.getarglist() @@ -541,7 +543,7 @@ emit_cast_int_to_ptr = _genop_same_as def emit_guard_no_exception(self, op, arglocs, regalloc): - self.mc.load_from_addr(r.SCRATCH2, self.cpu.pos_exception()) + self.mc.load_from_addr(r.SCRATCH2, r.SCRATCH2, self.cpu.pos_exception()) self.mc.cmp_op(0, r.SCRATCH2.value, 0, imm=True) self.guard_success_cc = c.EQ self._emit_guard(op, arglocs) @@ -586,6 +588,17 @@ mc.store(r.SCRATCH.value, r.SCRATCH2.value, 0) mc.store(r.SCRATCH.value, r.SCRATCH2.value, diff) + def _load_from_gc_table(self, rD, rT, index): + # rT is a temporary, may be equal to rD, must be != r0 + addr = self.gc_table_addr + index * WORD + self.mc.load_from_addr(rD, rT, addr) + + def emit_load_from_gc_table(self, op, arglocs, regalloc): + index = op.getarg(0).getint() + [resloc] = arglocs + assert resloc.is_reg() + self._load_from_gc_table(resloc, resloc, index) + class CallOpAssembler(object): @@ -646,9 +659,9 @@ guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.load_imm(r.SCRATCH, rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr))) - self.mc.store(r.SCRATCH.value, r.SPP.value, ofs) + faildescrindex = self.get_gcref_from_faildescr(faildescr) + self._load_from_gc_table(r.r2, r.r2, faildescrindex) + self.mc.store(r.r2.value, r.SPP.value, ofs) def _find_nearby_operation(self, regalloc, delta): return regalloc.operations[regalloc.rm.position + delta] diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -752,7 +752,6 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: @@ -762,8 +761,10 @@ regalloc = Regalloc(assembler=self) # self._call_header_with_stack_check() + allgcrefs = [] operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) + looptoken, allgcrefs) + self.reserve_gcref_table(allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -786,6 +787,7 @@ r_uint(rawstart + size_excluding_failure_stuff), r_uint(rawstart))) debug_stop("jit-backend-addr") + self.patch_gcref_table(looptoken, rawstart) self.patch_pending_failure_recoveries(rawstart) # ops_offset = self.mc.ops_offset @@ -840,11 +842,14 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() + self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() @@ -854,6 +859,7 @@ self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) rawstart = self.materialize_loop(original_loop_token) debug_bridge(descr_number, rawstart, codeendpos) + self.patch_gcref_table(original_loop_token, rawstart) self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard self.patch_jump_for_descr(faildescr, rawstart) @@ -868,6 +874,22 @@ self.teardown() return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + def reserve_gcref_table(self, allgcrefs): + # allocate the gc table right now. We write absolute loads in + # each load_from_gc_table instruction for now. XXX improve, + # but it's messy. + self.gc_table_addr = self.datablockwrapper.malloc_aligned( + len(allgcrefs) * WORD, alignment=WORD) + self.setup_gcrefs_list(allgcrefs) + + def patch_gcref_table(self, looptoken, rawstart): + rawstart = self.gc_table_addr + tracer = self.cpu.gc_ll_descr.make_gcref_tracer(rawstart, + self._allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() + def teardown(self): self.pending_guard_tokens = None self.mc = None @@ -921,12 +943,12 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() - fail_descr, target = self.store_info_on_descr(startpos, guardtok) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 - self.load_gcmap(self.mc, r.r2, gcmap=guardtok.gcmap) - self.mc.load_imm(r.r0, target) - self.mc.mtctr(r.r0.value) - self.mc.load_imm(r.r0, fail_descr) + self.mc.load_imm(r.r2, target) + self.mc.mtctr(r.r2.value) + self._load_from_gc_table(r.r0, r.r2, faildescrindex) + self.load_gcmap(self.mc, r.r2, gcmap=guardtok.gcmap) # preserves r0 self.mc.bctr() # we need to write at least 6 insns here, for patch_jump_for_descr() while self.mc.currpos() < startpos + 6 * 4: diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -523,18 +523,17 @@ return [loc1, res] def prepare_finish(self, op): - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] return locs + def prepare_load_from_gc_table(self, op): + res = self.rm.force_allocate_reg(op) + return [res] + def prepare_call_malloc_gc(self, op): return self._prepare_call(op) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -838,18 +838,6 @@ mc.writeimm32(allocated_depth) mc.copy_to_raw_memory(adr) - def get_asmmemmgr_blocks(self, looptoken): - clt = looptoken.compiled_loop_token - if clt.asmmemmgr_blocks is None: - clt.asmmemmgr_blocks = [] - return clt.asmmemmgr_blocks - - def get_asmmemmgr_gcreftracers(self, looptoken): - clt = looptoken.compiled_loop_token - if clt.asmmemmgr_gcreftracers is None: - clt.asmmemmgr_gcreftracers = [] - return clt.asmmemmgr_gcreftracers - def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None From pypy.commits at gmail.com Wed Apr 13 21:17:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 13 Apr 2016 18:17:42 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Factor out the pattern for calling an rposix function on a Path object Message-ID: <570eefb6.2976c20a.11f84.104c@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83659:d67878ce4e73 Date: 2016-04-14 01:34 +0100 http://bitbucket.org/pypy/pypy/changeset/d67878ce4e73/ Log: Factor out the pattern for calling an rposix function on a Path object diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -112,6 +112,17 @@ return func(fname1, fname2, *args) return dispatch + at specialize.arg(0) +def call_rposix(func, path, *args): + """Call a function that takes a filesystem path as its first argument""" + if path.as_unicode is not None: + return func(path.as_unicode, *args) + else: + path_b = path.as_bytes + assert path_b is not None + return func(path.as_bytes, *args) + + class Path(object): _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode'] @@ -1414,13 +1425,7 @@ "utime: 'ns' unsupported on this platform on PyPy") if utime_now: try: - if path.as_unicode is not None: - rposix.utime(path.as_unicode, None) - else: - path_b = path.as_bytes - assert path_b is not None - rposix.utime(path.as_bytes, None) - return + call_rposix(rposix.utime, path, None) except OSError as e: raise wrap_oserror(space, e) try: @@ -1435,12 +1440,7 @@ raise raise OperationError(space.w_TypeError, space.wrap(msg)) try: - if path.as_unicode is not None: - rposix.utime(path.as_unicode, (actime, modtime)) - else: - path_b = path.as_bytes - assert path_b is not None - rposix.utime(path_b, (actime, modtime)) + call_rposix(rposix.utime, path, (actime, modtime)) except OSError as e: raise wrap_oserror(space, e) From pypy.commits at gmail.com Wed Apr 13 21:17:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 13 Apr 2016 18:17:44 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Simplify stat(), fix error messages, add missing lstat() functionality Message-ID: <570eefb8.070d1c0a.e33e5.5e88@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83660:3612d36ef882 Date: 2016-04-14 02:16 +0100 http://bitbucket.org/pypy/pypy/changeset/3612d36ef882/ Log: Simplify stat(), fix error messages, add missing lstat() functionality diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -124,43 +124,44 @@ class Path(object): - _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode'] + _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode', 'w_path'] - def __init__(self, fd, bytes, unicode): + def __init__(self, fd, bytes, unicode, w_path): self.as_fd = fd self.as_bytes = bytes self.as_unicode = unicode + self.w_path = w_path class _PathOrFd(Unwrapper): def unwrap(self, space, w_value): if _WIN32: try: path_u = space.unicode_w(w_value) - return Path(-1, None, path_u) + return Path(-1, None, path_u, w_value) except OperationError: pass try: path_b = space.fsencode_w(w_value) - return Path(-1, path_b, None) + return Path(-1, path_b, None, w_value) except OperationError: pass if not space.isinstance_w(w_value, space.w_int): raise oefmt(space.w_TypeError, "argument should be string, bytes or integer, not %T", w_value) fd = unwrap_fd(space, w_value) - return Path(fd, None, None) + return Path(fd, None, None, w_value) class _JustPath(Unwrapper): def unwrap(self, space, w_value): if _WIN32: try: path_u = space.unicode_w(w_value) - return Path(-1, None, path_u) + return Path(-1, None, path_u, w_value) except OperationError: pass try: path_b = space.fsencode_w(w_value) - return Path(-1, path_b, None) + return Path(-1, path_b, None, w_value) except OperationError: raise oefmt(space.w_TypeError, "illegal type for path parameter") @@ -409,8 +410,11 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(rposix.HAVE_FSTATAT), follow_symlinks=kwonly(bool)) -def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + at unwrap_spec( + path=path_or_fd(allow_fd=True), + dir_fd=DirFD(rposix.HAVE_FSTATAT), + follow_symlinks=kwonly(bool)) +def stat(space, path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result Perform a stat system call on the given path. @@ -426,42 +430,43 @@ link points to. It is an error to use dir_fd or follow_symlinks when specifying path as an open file descriptor.""" - if follow_symlinks and dir_fd == DEFAULT_DIR_FD: - try: - st = dispatch_filename(rposix_stat.stat, 0, - allow_fd_fn=rposix_stat.fstat)(space, w_path) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + return do_stat(space, "stat", path, dir_fd, follow_symlinks) + + at specialize.arg(1) +def do_stat(space, funcname, path, dir_fd, follow_symlinks): + """Common implementation for stat() and lstat()""" + try: + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "%s: can't specify both dir_fd and fd", funcname) + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "%s: cannot use fd and follow_symlinks together", funcname) + st = rposix_stat.fstat(path.as_fd) + elif follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.stat, path) + elif not follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.lstat, path) + elif rposix.HAVE_FSTATAT: + st = call_rposix(rposix_stat.fstatat, path, dir_fd, follow_symlinks) else: - return build_stat_result(space, st) - - if not follow_symlinks and dir_fd == DEFAULT_DIR_FD: - return lstat(space, w_path) - - if rposix.HAVE_FSTATAT: - try: - path = space.fsencode_w(w_path) - st = rposix_stat.fstatat(path, dir_fd, follow_symlinks) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise oefmt(space.w_NotImplementedError, + "%s: unsupported argument combination", funcname) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) + else: return build_stat_result(space, st) - raise oefmt(space.w_NotImplementedError, - "stat: unsupported argument combination") - - at unwrap_spec(dir_fd=DirFD(available=False)) -def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_FSTATAT)) +def lstat(space, path, dir_fd=DEFAULT_DIR_FD): """lstat(path, *, dir_fd=None) -> stat result Like stat(), but do not follow symbolic links. Equivalent to stat(path, follow_symlinks=False).""" - - try: - st = dispatch_filename(rposix_stat.lstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - else: - return build_stat_result(space, st) + return do_stat(space, "lstat", path, dir_fd, False) class StatState(object): def __init__(self, space): From pypy.commits at gmail.com Thu Apr 14 02:37:59 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Apr 2016 23:37:59 -0700 (PDT) Subject: [pypy-commit] pypy default: Print more info, to end up in the captured stdout. For the armhf test failure, Message-ID: <570f3ac7.a151c20a.21520.567e@mx.google.com> Author: Armin Rigo Branch: Changeset: r83661:dcccc77da828 Date: 2016-04-14 07:54 +0200 http://bitbucket.org/pypy/pypy/changeset/dcccc77da828/ Log: Print more info, to end up in the captured stdout. For the armhf test failure, which fails on buildbot but passes when run directly diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4501,19 +4501,28 @@ def checkops(mc, ops_regexp): import re - words = [line.split("\t")[2].split()[0] + ';' for line in mc] + words = [] + print '----- checkops -----' + for line in mc: + print line.rstrip() + t = line.split("\t") + if len(t) <= 2: + continue + w = t[2].split() + if len(w) == 0: + continue + words.append(w[0] + ';') + print '[[%s]]' % (w[0],) text = ' '.join(words) assert re.compile(ops_regexp).match(text) data = ctypes.string_at(info.asmaddr, info.asmlen) try: mc = list(machine_code_dump(data, info.asmaddr, cpuname)) - lines = [line for line in mc if line.count('\t') >= 2] - checkops(lines, self.add_loop_instructions) + checkops(mc, self.add_loop_instructions) data = ctypes.string_at(bridge_info.asmaddr, bridge_info.asmlen) mc = list(machine_code_dump(data, bridge_info.asmaddr, cpuname)) - lines = [line for line in mc if line.count('\t') >= 2] - checkops(lines, self.bridge_loop_instructions) + checkops(mc, self.bridge_loop_instructions) except ObjdumpNotFound: py.test.skip("requires (g)objdump") From pypy.commits at gmail.com Thu Apr 14 02:38:01 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Apr 2016 23:38:01 -0700 (PDT) Subject: [pypy-commit] pypy default: Windows fix: unicode chars are size 2. Message-ID: <570f3ac9.2413c30a.340a3.2b2f@mx.google.com> Author: Armin Rigo Branch: Changeset: r83662:ab3f2b26b098 Date: 2016-04-14 08:19 +0200 http://bitbucket.org/pypy/pypy/changeset/ab3f2b26b098/ Log: Windows fix: unicode chars are size 2. Also, clean up after running the test. diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1217,12 +1217,12 @@ '%(unicodelendescr.field_size)s)'], ## getitem str/unicode - [True, (4,), 'i3 = unicodegetitem(p0,i1)' '->' + [True, (2,4), 'i3 = unicodegetitem(p0,i1)' '->' 'i3 = gc_load_indexed_i(p0,i1,' '%(unicodedescr.itemsize)d,' '%(unicodedescr.basesize)d,' '%(unicodedescr.itemsize)d)'], - #[False, (4,), 'i3 = unicodegetitem(p0,i1)' '->' + #[False, (2,4), 'i3 = unicodegetitem(p0,i1)' '->' # 'i4 = int_mul(i1, %(unicodedescr.itemsize)d);' # 'i5 = int_add(i4, %(unicodedescr.basesize)d);' # 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'], @@ -1236,7 +1236,7 @@ [True, (4,), 'i3 = strsetitem(p0,i1,0)' '->' 'i3 = gc_store_indexed(p0,i1,0,1,' '%(strdescr.basesize)d,1)'], - [True, (4,), 'i3 = unicodesetitem(p0,i1,0)' '->' + [True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->' 'i3 = gc_store_indexed(p0,i1,0,' '%(unicodedescr.itemsize)d,' '%(unicodedescr.basesize)d,' @@ -1277,19 +1277,22 @@ if not factors: all_supported_sizes = [(1,), (1,2,), (4,), (1,2,4,8)] - for factors in all_supported_sizes: - self.cpu.load_supported_factors = factors - f, t = fromto.split('->') - t = ('\n' +(' '*16)).join([s for s in t.split(';')]) - self.check_rewrite(""" - [p0,i1,i2] - {f} - jump() - """.format(**locals()), """ - [p0,i1,i2] - {t} - jump() - """.format(**locals())) + try: + for factors in all_supported_sizes: + self.cpu.load_supported_factors = factors + f, t = fromto.split('->') + t = ('\n' +(' '*20)).join([s for s in t.split(';')]) + self.check_rewrite(""" + [p0,i1,i2] + {f} + jump() + """.format(**locals()), """ + [p0,i1,i2] + {t} + jump() + """.format(**locals())) + finally: + del self.cpu.load_supported_factors # restore class-level value def test_load_from_gc_table_1i(self): self.check_rewrite(""" From pypy.commits at gmail.com Thu Apr 14 03:17:50 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 00:17:50 -0700 (PDT) Subject: [pypy-commit] pypy default: Makes the test run, at least Message-ID: <570f441e.4412c30a.ba3b8.590d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83663:3d731562ee06 Date: 2016-04-14 09:17 +0200 http://bitbucket.org/pypy/pypy/changeset/3d731562ee06/ Log: Makes the test run, at least diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -2,7 +2,8 @@ import sys class AppTestWin32: - spaceconfig = dict(usemodules=('_multiprocessing',)) + spaceconfig = dict(usemodules=('_multiprocessing', + 'signal', '_rawffi', 'binascii')) def setup_class(cls): if sys.platform != "win32": From pypy.commits at gmail.com Thu Apr 14 03:35:19 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 00:35:19 -0700 (PDT) Subject: [pypy-commit] pypy default: Improve test_seek, and try to make it not fail on windows. Message-ID: <570f4837.96811c0a.ab146.ffffb71b@mx.google.com> Author: Armin Rigo Branch: Changeset: r83664:57d144629ace Date: 2016-04-14 09:34 +0200 http://bitbucket.org/pypy/pypy/changeset/57d144629ace/ Log: Improve test_seek, and try to make it not fail on windows. Failed on that: now it just silently ends py.test. No clue. diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -267,19 +267,28 @@ self.interpret(f, []) def test_seek(self): + from sys import platform fname = str(self.tmpdir.join('file_4')) def f(): f = open(fname, "w+") - f.write("xxx") + f.write("abcdef") f.seek(0) - assert f.read() == "xxx" + assert f.read() == "abcdef" + f.seek(1) + assert f.read() == "bcdef" + f.seek(2) + f.seek(-2, 2) + assert f.read() == "ef" + f.seek(2) + f.seek(-1, 1) + assert f.read() == "bcdef" try: f.seek(0, 42) except IOError as e: assert e.errno == errno.EINVAL else: - assert False + assert platform == 'win32' f.close() f() From pypy.commits at gmail.com Thu Apr 14 03:40:59 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 00:40:59 -0700 (PDT) Subject: [pypy-commit] pypy default: "fix" Message-ID: <570f498b.52ad1c0a.332f1.73a4@mx.google.com> Author: Armin Rigo Branch: Changeset: r83665:c957e8268d73 Date: 2016-04-14 09:39 +0200 http://bitbucket.org/pypy/pypy/changeset/c957e8268d73/ Log: "fix" diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -267,7 +267,6 @@ self.interpret(f, []) def test_seek(self): - from sys import platform fname = str(self.tmpdir.join('file_4')) def f(): @@ -283,12 +282,14 @@ f.seek(2) f.seek(-1, 1) assert f.read() == "bcdef" - try: - f.seek(0, 42) - except IOError as e: - assert e.errno == errno.EINVAL - else: - assert platform == 'win32' + #---is the following behavior interesting in RPython? + #---I claim not, and it doesn't work on Windows + #try: + # f.seek(0, 42) + #except IOError as e: + # assert e.errno == errno.EINVAL + #else: + # assert False f.close() f() From pypy.commits at gmail.com Thu Apr 14 04:42:41 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 01:42:41 -0700 (PDT) Subject: [pypy-commit] pypy default: Rename argument according to docstring Message-ID: <570f5801.4412c30a.ba3b8.79ba@mx.google.com> Author: Armin Rigo Branch: Changeset: r83666:a2cec1d99cc5 Date: 2016-04-14 10:42 +0200 http://bitbucket.org/pypy/pypy/changeset/a2cec1d99cc5/ Log: Rename argument according to docstring diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -15,9 +15,9 @@ # ____________________________________________________________ -def sorted(lst, cmp=None, key=None, reverse=False): +def sorted(iterable, cmp=None, key=None, reverse=False): "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(cmp, key, reverse) return sorted_lst From pypy.commits at gmail.com Thu Apr 14 06:26:51 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 03:26:51 -0700 (PDT) Subject: [pypy-commit] pypy default: oops. probable fix. will try to make a unit test Message-ID: <570f706b.28ddc20a.e35d1.ffffb54a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83667:d072c237452a Date: 2016-04-14 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/d072c237452a/ Log: oops. probable fix. will try to make a unit test diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -783,12 +783,15 @@ # get_max_size_of_gen_load_int() instructions. No point # in optimizing in case we get less. Just in case though, # we check and pad with nops. - extra_bytes = mc.get_max_size_of_gen_load_int() * 2 + extra_bytes = mc.get_max_size_of_gen_load_int() * 4 offset -= extra_bytes start = mc.get_relative_pos() mc.gen_load_int(regnum, offset) - while mc.get_relative_pos() != start + extra_bytes: + missing = start + extra_bytes - mc.get_relative_pos() + while missing > 0: mc.NOP() + missing = start + extra_bytes - mc.get_relative_pos() + assert missing == 0 mc.LDR_rr(regnum, r.pc.value, regnum) def new_stack_loc(self, i, tp): From pypy.commits at gmail.com Thu Apr 14 06:40:41 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 03:40:41 -0700 (PDT) Subject: [pypy-commit] pypy default: Direct test for load_from_gc_table. Message-ID: <570f73a9.46941c0a.999fd.fffff5a0@mx.google.com> Author: Armin Rigo Branch: Changeset: r83668:000e7c099390 Date: 2016-04-14 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/000e7c099390/ Log: Direct test for load_from_gc_table. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5263,3 +5263,36 @@ fail = self.cpu.get_latest_descr(deadframe) res = self.cpu.get_int_value(deadframe, 0) assert res == 0 + + def test_load_from_gc_table_many(self): + # Test that 'load_from_gc_table' handles a table of NUM entries. + # Done by writing NUM setfield_gc on constants. Each one + # requires a load_from_gc_table. The value of NUM is choosen + # so that not all of them fit into the ARM's 4096-bytes offset. + NUM = 1025 + S = lltype.GcStruct('S', ('x', lltype.Signed)) + fielddescr = self.cpu.fielddescrof(S, 'x') + table = [lltype.malloc(S) for i in range(NUM)] + looptoken = JitCellToken() + targettoken = TargetToken() + ops = [ + '[]', + ] + namespace = {'fielddescr': fielddescr, + 'finaldescr': BasicFinalDescr(5)} + for i, s in enumerate(table): + ops.append('setfield_gc(ConstPtr(ptr%d), %d, descr=fielddescr)' + % (i, i)) + namespace['ptr%d' % i] = lltype.cast_opaque_ptr(llmemory.GCREF, s) + ops.append('finish(descr=finaldescr)') + + loop = parse('\n'.join(ops), namespace=namespace) + + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 5 + + # check that all setfield_gc() worked + for i, s in enumerate(table): + assert s.x == i From pypy.commits at gmail.com Thu Apr 14 06:57:32 2016 From: pypy.commits at gmail.com (catalin_m) Date: Thu, 14 Apr 2016 03:57:32 -0700 (PDT) Subject: [pypy-commit] pypy detect_cpu_count: (catalinm) detect_number_of_processors now uses multiprocessing.cpu_count as default behavior, the initial implementation is now just a fallback. Modified test_cpuinfo_linux to properly check its functionality. Modified the cpuinfo string to check regex for more than 10 cpus. Message-ID: <570f779c.c11a1c0a.379f4.0ff6@mx.google.com> Author: Catalin Gabriel Manciu Branch: detect_cpu_count Changeset: r83669:91e8f98d883a Date: 2016-04-14 13:47 +0300 http://bitbucket.org/pypy/pypy/changeset/91e8f98d883a/ Log: (catalinm) detect_number_of_processors now uses multiprocessing.cpu_count as default behavior, the initial implementation is now just a fallback. Modified test_cpuinfo_linux to properly check its functionality. Modified the cpuinfo string to check regex for more than 10 cpus. diff --git a/rpython/config/support.py b/rpython/config/support.py --- a/rpython/config/support.py +++ b/rpython/config/support.py @@ -4,18 +4,12 @@ import re, sys, os, subprocess -def detect_number_of_processors(filename_or_file='/proc/cpuinfo'): - if os.environ.get('MAKEFLAGS'): - return 1 # don't override MAKEFLAGS. This will call 'make' without any '-j' option +def detect_number_of_processors_fallback(filename_or_file): if sys.platform == 'darwin': return sysctl_get_cpu_count('/usr/sbin/sysctl') elif sys.platform.startswith('freebsd'): return sysctl_get_cpu_count('/sbin/sysctl') elif not sys.platform.startswith('linux'): - try: - import multiprocessing - return multiprocessing.cpu_count() - except: return 1 # try to use cpu_count on other platforms or fallback to 1 try: if isinstance(filename_or_file, str): @@ -28,6 +22,15 @@ except: return 1 # we really don't want to explode here, at worst we have 1 +def detect_number_of_processors(filename_or_file='/proc/cpuinfo'): + if os.environ.get('MAKEFLAGS'): + return 1 # don't override MAKEFLAGS. This will call 'make' without any '-j' option + try: + import multiprocessing + return multiprocessing.cpu_count() + except: + return detect_number_of_processors_fallback(filename_or_file) + def sysctl_get_cpu_count(cmd, name='hw.ncpu'): try: proc = subprocess.Popen([cmd, '-n', name], stdout=subprocess.PIPE) diff --git a/rpython/config/test/test_support.py b/rpython/config/test/test_support.py --- a/rpython/config/test/test_support.py +++ b/rpython/config/test/test_support.py @@ -30,6 +30,13 @@ cache size\t: 4096 KB physical id\t: 0 siblings\t: 4 + +processor\t: 10 +vendor_id\t: GenuineIntel +cpu family\t: 6 +model\t\t: 37 +model name\t: Intel(R) Core(TM) i7 CPU L 620 @ 2.00GHz +stepping\t: 2 """ class FakeEnviron: @@ -43,14 +50,35 @@ if not sys.platform.startswith('linux'): py.test.skip("linux only") saved = os.environ + # old_cpu_count will be multiprocessing.cpu_count if multiprocessing module is available or None if the import fails try: + import multiprocessing + old_cpu_count = multiprocessing.cpu_count + except: + old_cpu_count = None + if old_cpu_count != None: # if multiprocessing module is available + # test common behavior + assert support.detect_number_of_processors() == multiprocessing.cpu_count() + # test common behaviour when MAKEFLAGS is set + os.environ = FakeEnviron('-j2') + assert support.detect_number_of_processors() == 1 + # create an override for cpu_count that throws an exception in order to test the fallback behavior of + # support.detect_number_of_processors() + def fail_cpu_count(): + raise Exception("Failure") + multiprocessing.cpu_count = fail_cpu_count + try: + # test fallback behavior (multiprocessing.cpu_count() throwing an exception or multiprocessing module + # not available) os.environ = FakeEnviron(None) - assert support.detect_number_of_processors(StringIO(cpuinfo)) == 4 + assert support.detect_number_of_processors(StringIO(cpuinfo)) == 11 assert support.detect_number_of_processors('random crap that does not exist') == 1 os.environ = FakeEnviron('-j2') assert support.detect_number_of_processors(StringIO(cpuinfo)) == 1 finally: os.environ = saved + if old_cpu_count != None: + multiprocessing.cpu_count = old_cpu_count def test_cpuinfo_sysctl(): if sys.platform != 'darwin' and not sys.platform.startswith('freebsd'): From pypy.commits at gmail.com Thu Apr 14 09:01:14 2016 From: pypy.commits at gmail.com (catalin_m) Date: Thu, 14 Apr 2016 06:01:14 -0700 (PDT) Subject: [pypy-commit] pypy detect_cpu_count: (catalin_m) Renamed support.detect_number_of_processors to support.detect_number_of_usable_processors to (somewhat) avoid confusion. Put back the usable cpu counting logic (cpu_count / 2 for >= 4 cpus with a minimum of 3, cpu_count otherwise). Now applies to BSD and MACOS as well. Moved the logic to support.usable_processors_from_total_processors(total) and added tests for it. Changed tests to reflect changes in the support module Message-ID: <570f949a.a12dc20a.361e2.ffffc85f@mx.google.com> Author: Catalin Gabriel Manciu Branch: detect_cpu_count Changeset: r83670:2f0bd0017db0 Date: 2016-04-14 15:52 +0300 http://bitbucket.org/pypy/pypy/changeset/2f0bd0017db0/ Log: (catalin_m) Renamed support.detect_number_of_processors to support.detect_number_of_usable_processors to (somewhat) avoid confusion. Put back the usable cpu counting logic (cpu_count / 2 for >= 4 cpus with a minimum of 3, cpu_count otherwise). Now applies to BSD and MACOS as well. Moved the logic to support.usable_processors_from_total_processors(total) and added tests for it. Changed tests to reflect changes in the support module diff --git a/rpython/config/support.py b/rpython/config/support.py --- a/rpython/config/support.py +++ b/rpython/config/support.py @@ -4,32 +4,40 @@ import re, sys, os, subprocess -def detect_number_of_processors_fallback(filename_or_file): - if sys.platform == 'darwin': - return sysctl_get_cpu_count('/usr/sbin/sysctl') - elif sys.platform.startswith('freebsd'): - return sysctl_get_cpu_count('/sbin/sysctl') - elif not sys.platform.startswith('linux'): - return 1 # try to use cpu_count on other platforms or fallback to 1 - try: - if isinstance(filename_or_file, str): - f = open(filename_or_file, "r") - else: - f = filename_or_file - return max([int(re.split('processor.*?(\d+)', line)[1]) - for line in f.readlines() - if line.startswith('processor')]) + 1 # returning the actual number of available CPUs - except: - return 1 # we really don't want to explode here, at worst we have 1 +def usable_processors_from_total_processors(total): + if total >= 4: + return max(total // 2, 3) + return total -def detect_number_of_processors(filename_or_file='/proc/cpuinfo'): +def detect_number_of_usable_processors(filename_or_file='/proc/cpuinfo'): if os.environ.get('MAKEFLAGS'): return 1 # don't override MAKEFLAGS. This will call 'make' without any '-j' option + + def fallback(filename_or_file): + if sys.platform == 'darwin': + return sysctl_get_cpu_count('/usr/sbin/sysctl') + elif sys.platform.startswith('freebsd'): + return sysctl_get_cpu_count('/sbin/sysctl') + elif not sys.platform.startswith('linux'): + return 1 # try to use cpu_count on other platforms or fallback to 1 + try: + if isinstance(filename_or_file, str): + f = open(filename_or_file, "r") + else: + f = filename_or_file + return max([int(re.split('processor.*?(\d+)', line)[1]) + for line in f.readlines() + if line.startswith('processor')]) + 1 # returning the actual number of available CPUs + except: + return 1 # we really don't want to explode here, at worst we have 1 + try: import multiprocessing - return multiprocessing.cpu_count() + count = multiprocessing.cpu_count() except: - return detect_number_of_processors_fallback(filename_or_file) + count = fallback(filename_or_file) + return usable_processors_from_total_processors(count) + def sysctl_get_cpu_count(cmd, name='hw.ncpu'): try: diff --git a/rpython/config/test/test_support.py b/rpython/config/test/test_support.py --- a/rpython/config/test/test_support.py +++ b/rpython/config/test/test_support.py @@ -46,9 +46,18 @@ assert varname == 'MAKEFLAGS' return self._value +def test_usable_cpus_from_total_cpus(): + # test usable_processors_from_total_processors function + assert support.usable_processors_from_total_processors(1) == 1 + assert support.usable_processors_from_total_processors(3) == 3 + assert support.usable_processors_from_total_processors(4) == 3 + assert support.usable_processors_from_total_processors(8) == 4 + assert support.usable_processors_from_total_processors(16) == 8 + def test_cpuinfo_linux(): if not sys.platform.startswith('linux'): py.test.skip("linux only") + saved = os.environ # old_cpu_count will be multiprocessing.cpu_count if multiprocessing module is available or None if the import fails try: @@ -56,14 +65,15 @@ old_cpu_count = multiprocessing.cpu_count except: old_cpu_count = None + if old_cpu_count != None: # if multiprocessing module is available # test common behavior - assert support.detect_number_of_processors() == multiprocessing.cpu_count() + assert support.detect_number_of_usable_processors() == support.usable_processors_from_total_processors(multiprocessing.cpu_count()) # test common behaviour when MAKEFLAGS is set os.environ = FakeEnviron('-j2') - assert support.detect_number_of_processors() == 1 + assert support.detect_number_of_usable_processors() == 1 # create an override for cpu_count that throws an exception in order to test the fallback behavior of - # support.detect_number_of_processors() + # support.detect_number_of_usable_processors() def fail_cpu_count(): raise Exception("Failure") multiprocessing.cpu_count = fail_cpu_count @@ -71,10 +81,10 @@ # test fallback behavior (multiprocessing.cpu_count() throwing an exception or multiprocessing module # not available) os.environ = FakeEnviron(None) - assert support.detect_number_of_processors(StringIO(cpuinfo)) == 11 - assert support.detect_number_of_processors('random crap that does not exist') == 1 + assert support.detect_number_of_usable_processors(StringIO(cpuinfo)) == support.usable_processors_from_total_processors(11) + assert support.detect_number_of_usable_processors('random crap that does not exist') == 1 os.environ = FakeEnviron('-j2') - assert support.detect_number_of_processors(StringIO(cpuinfo)) == 1 + assert support.detect_number_of_usable_processors(StringIO(cpuinfo)) == 1 finally: os.environ = saved if old_cpu_count != None: @@ -94,9 +104,9 @@ try: support.sysctl_get_cpu_count = count os.environ = FakeEnviron(None) - assert support.detect_number_of_processors() == 42 + assert support.detect_number_of_usable_processors() == support.usable_processors_from_total_processors(42) os.environ = FakeEnviron('-j2') - assert support.detect_number_of_processors() == 1 + assert support.detect_number_of_usable_processors() == 1 finally: os.environ = saved support.sysctl_get_cpu_count = saved_func diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -3,7 +3,7 @@ from rpython.config.config import OptionDescription, BoolOption, IntOption, ArbitraryOption, FloatOption from rpython.config.config import ChoiceOption, StrOption, Config, ConflictConfigError from rpython.config.config import ConfigError -from rpython.config.support import detect_number_of_processors +from rpython.config.support import detect_number_of_usable_processors from rpython.translator.platform import platform as compiler @@ -172,7 +172,7 @@ negation=False), IntOption("make_jobs", "Specify -j argument to make for compilation" " (C backend only)", - cmdline="--make-jobs", default=detect_number_of_processors()), + cmdline="--make-jobs", default=detect_number_of_usable_processors()), # Flags of the TranslationContext: BoolOption("list_comprehension_operations", From pypy.commits at gmail.com Thu Apr 14 11:32:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Apr 2016 08:32:08 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Fall back to the buffer protocol in space.bytes_w() Message-ID: <570fb7f8.939d1c0a.2cf81.ffff8012@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83671:4b64950d0558 Date: 2016-04-14 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/4b64950d0558/ Log: Fall back to the buffer protocol in space.bytes_w() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,7 +231,8 @@ raise BufferInterfaceNotFound def bytes_w(self, space): - self._typed_unwrap_error(space, "bytes") + buffer = space.buffer_w(self, space.BUF_FULL_RO) + return buffer.as_str() def unicode_w(self, space): self._typed_unwrap_error(space, "string") @@ -398,7 +399,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -1537,7 +1538,7 @@ """ if w_obj is unicode, call identifier_w() (i.e., return the UTF-8 encoded string). Else, call bytes_w(). - + Maybe we should kill str_w completely and manually substitute it with identifier_w/bytes_w at all call sites? """ From pypy.commits at gmail.com Thu Apr 14 11:33:58 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Apr 2016 08:33:58 -0700 (PDT) Subject: [pypy-commit] pypy default: phew, restore showing the assembler Message-ID: <570fb866.52ad1c0a.332f1.36cf@mx.google.com> Author: Armin Rigo Branch: Changeset: r83672:405af54d6c54 Date: 2016-04-14 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/405af54d6c54/ Log: phew, restore showing the assembler diff --git a/rpython/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py --- a/rpython/tool/jitlogparser/parser.py +++ b/rpython/tool/jitlogparser/parser.py @@ -93,15 +93,19 @@ if backend_dump is not None: raw_asm = self._asm_disassemble(backend_dump.decode('hex'), backend_tp, dump_start) + # additional mess: if the backend_dump starts with a series + # of zeros, raw_asm's first regular line is *after* that, + # after a line saying "...". So we assume that start==dump_start + # if this parameter was passed. asm = [] - start = 0 + start = dump_start for elem in raw_asm: if len(elem.split("\t")) < 3: continue e = elem.split("\t") adr = e[0] v = elem # --- more compactly: " ".join(e[2:]) - if not start: + if not start: # only if 'dump_start' is left at 0 start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start if ofs >= 0: @@ -127,9 +131,9 @@ op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)]) return loop - def _asm_disassemble(self, d, origin_addr, tp): + def _asm_disassemble(self, d, tp, origin_addr): from rpython.jit.backend.tool.viewcode import machine_code_dump - return list(machine_code_dump(d, tp, origin_addr)) + return list(machine_code_dump(d, origin_addr, tp)) @classmethod def parse_from_input(cls, input, **kwds): @@ -419,10 +423,11 @@ world.parse(entry.splitlines(True)) dumps = {} for r in world.ranges: - if r.addr in addrs and addrs[r.addr]: - name = addrs[r.addr].pop(0) # they should come in order - data = r.data.encode('hex') # backward compatibility - dumps[name] = (world.backend_name, r.addr, data) + for pos1 in range(r.addr, r.addr + len(r.data)): + if pos1 in addrs and addrs[pos1]: + name = addrs[pos1].pop(0) # they should come in order + data = r.data.encode('hex') + dumps[name] = (world.backend_name, r.addr, data) loops = [] cat = extract_category(log, 'jit-log-opt') if not cat: From pypy.commits at gmail.com Thu Apr 14 20:24:55 2016 From: pypy.commits at gmail.com (pjenvey) Date: Thu, 14 Apr 2016 17:24:55 -0700 (PDT) Subject: [pypy-commit] pypy default: fix range check thinko; calculate w/ bits not bytes Message-ID: <571034d7.08851c0a.f6e6b.ffffdac2@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83673:3d517af71821 Date: 2016-04-14 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/3d517af71821/ Log: fix range check thinko; calculate w/ bits not bytes diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -99,12 +99,13 @@ for tc in 'BHIL': a = self.array(tc) - vals = [0, 2 ** a.itemsize - 1] + itembits = a.itemsize * 8 + vals = [0, 2 ** itembits - 1] a.fromlist(vals) assert a.tolist() == vals a = self.array(tc.lower()) - vals = [-1 * (2 ** a.itemsize) / 2, (2 ** a.itemsize) / 2 - 1] + vals = [-1 * (2 ** itembits) / 2, (2 ** itembits) / 2 - 1] a.fromlist(vals) assert a.tolist() == vals From pypy.commits at gmail.com Thu Apr 14 20:24:57 2016 From: pypy.commits at gmail.com (pjenvey) Date: Thu, 14 Apr 2016 17:24:57 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix range check thinko; calculate w/ bits not bytes Message-ID: <571034d9.52ad1c0a.332f1.ffffdcb5@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83674:8c9c137475bb Date: 2016-04-14 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/8c9c137475bb/ Log: fix range check thinko; calculate w/ bits not bytes (grafted from 3d517af7182117c996f1ddb710c5b34386465d30) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -79,12 +79,13 @@ for tc in 'BHIL': a = self.array(tc) - vals = [0, 2 ** a.itemsize - 1] + itembits = a.itemsize * 8 + vals = [0, 2 ** itembits - 1] a.fromlist(vals) assert a.tolist() == vals a = self.array(tc.lower()) - vals = [-1 * (2 ** a.itemsize) // 2, (2 ** a.itemsize) // 2 - 1] + vals = [-1 * (2 ** itembits) // 2, (2 ** itembits) // 2 - 1] a.fromlist(vals) assert a.tolist() == vals From pypy.commits at gmail.com Thu Apr 14 20:24:59 2016 From: pypy.commits at gmail.com (pjenvey) Date: Thu, 14 Apr 2016 17:24:59 -0700 (PDT) Subject: [pypy-commit] pypy py3k: kill this assert now that 3.3 supports Q (longlong) conversions, for 32bit Message-ID: <571034db.8d1b1c0a.cfaec.2d08@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83675:17e4f1a18996 Date: 2016-04-14 17:23 -0700 http://bitbucket.org/pypy/pypy/changeset/17e4f1a18996/ Log: kill this assert now that 3.3 supports Q (longlong) conversions, for 32bit platforms diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -594,7 +594,6 @@ self.method = method if self.canoverflow: - assert self.bytes <= rffi.sizeof(rffi.ULONG) if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ self.unwrap == 'int_w': # Treat this type as a ULONG diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -77,7 +77,7 @@ except OverflowError: pass - for tc in 'BHIL': + for tc in 'BHILQ': a = self.array(tc) itembits = a.itemsize * 8 vals = [0, 2 ** itembits - 1] From pypy.commits at gmail.com Thu Apr 14 20:31:51 2016 From: pypy.commits at gmail.com (pjenvey) Date: Thu, 14 Apr 2016 17:31:51 -0700 (PDT) Subject: [pypy-commit] pypy py3k: prefer oefmt for new code Message-ID: <57103677.2457c20a.61d0d.ffffcea6@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83676:136c64b6dd72 Date: 2016-04-14 17:30 -0700 http://bitbucket.org/pypy/pypy/changeset/136c64b6dd72/ Log: prefer oefmt for new code diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1,5 +1,5 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from rpython.rlib import jit @@ -510,17 +510,17 @@ state = space.unpackiterable(w_state) num_args = len(state) if num_args < 1: - raise OperationError(space.w_TypeError, - space.wrap("function takes at least 1 argument " - "(" + str(num_args) + " given)")) + raise oefmt(space.w_TypeError, + "function takes at least 1 argument (%d given)", + num_args) elif num_args == 1: self.w_iterables = state[0] elif num_args == 2: self.w_iterables, self.w_it = state else: - raise OperationError(space.w_TypeError, - space.wrap("function takes at most 2 arguments " - "(" + str(num_args) + " given)")) + raise oefmt(space.w_TypeError, + "function takes at most 2 arguments (%d given)", + num_args) def W_Chain___new__(space, w_subtype, args_w): r = space.allocate_instance(W_Chain, w_subtype) @@ -978,9 +978,9 @@ state = space.unpackiterable(w_state) num_args = len(state) if num_args != 3: - raise OperationError(space.w_TypeError, - space.wrap("function takes exactly 3 arguments " - "(" + str(num_args) + " given)")) + raise oefmt(space.w_TypeError, + "function takes exactly 3 arguments (%d given)", + num_args) w_key, w_lookahead, _ = state self.w_key = w_key self.w_lookahead = w_lookahead @@ -1419,8 +1419,7 @@ def descr_setstate(self, space, w_state): indices_w = space.fixedview(w_state) if len(indices_w) != self.r: - raise OperationError(space.w_ValueError, space.wrap( - "invalid arguments")) + raise oefmt(space.w_ValueError, "invalid arguments") for i in range(self.r): index = space.int_w(indices_w[i]) max = self.get_maximum(i) @@ -1539,12 +1538,10 @@ cycles_w = space.unpackiterable(w_cycles) self.started = space.bool_w(w_started) else: - raise OperationError(space.w_ValueError, space.wrap( - "invalid arguments")) + raise oefmt(space.w_ValueError, "invalid arguments") if len(indices_w) != len(self.pool_w) or len(cycles_w) != self.r: - raise OperationError(space.w_ValueError, space.wrap( - "inavalid arguments")) + raise oefmt(space.w_ValueError, "inavalid arguments") n = len(self.pool_w) for i in range(n): From pypy.commits at gmail.com Thu Apr 14 23:37:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Apr 2016 20:37:46 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: match CPython error messages better Message-ID: <5710620a.c31f1c0a.9aa65.fffff2d4@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83677:c353cff42cfd Date: 2016-04-15 04:36 +0100 http://bitbucket.org/pypy/pypy/changeset/c353cff42cfd/ Log: match CPython error messages better diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -134,6 +134,9 @@ class _PathOrFd(Unwrapper): def unwrap(self, space, w_value): + if space.is_none(w_value): + raise oefmt(space.w_TypeError, + "can't specify None for path argument") if _WIN32: try: path_u = space.unicode_w(w_value) @@ -145,10 +148,7 @@ return Path(-1, path_b, None, w_value) except OperationError: pass - if not space.isinstance_w(w_value, space.w_int): - raise oefmt(space.w_TypeError, - "argument should be string, bytes or integer, not %T", w_value) - fd = unwrap_fd(space, w_value) + fd = unwrap_fd(space, w_value, "string, bytes or integer") return Path(fd, None, None, w_value) class _JustPath(Unwrapper): @@ -175,8 +175,16 @@ DEFAULT_DIR_FD = -100 DIR_FD_AVAILABLE = False -def unwrap_fd(space, w_value): - return space.c_int_w(w_value) + at specialize.arg(2) +def unwrap_fd(space, w_value, allowed_types='integer'): + try: + return space.c_int_w(w_value) + except OperationError as e: + if not e.match(space, space.w_OverflowError): + raise oefmt(space.w_TypeError, + "argument should be %s, not %T", allowed_types, w_value) + else: + raise def _unwrap_dirfd(space, w_value): if space.is_none(w_value): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -174,6 +174,10 @@ import stat st = self.posix.stat(".") assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(b".") + assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(bytearray(b".")) + assert stat.S_ISDIR(st.st_mode) st = self.posix.lstat(".") assert stat.S_ISDIR(st.st_mode) @@ -185,6 +189,11 @@ assert exc.value.errno == errno.ENOENT assert exc.value.filename == "nonexistentdir/nonexistentfile" + excinfo = raises(TypeError, self.posix.stat, None) + assert "can't specify None" in str(excinfo.value) + excinfo = raises(TypeError, self.posix.stat, 2.) + assert "should be string, bytes or integer, not float" in str(excinfo.value) + if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): st = self.posix.statvfs(".") From pypy.commits at gmail.com Fri Apr 15 03:05:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Apr 2016 00:05:19 -0700 (PDT) Subject: [pypy-commit] pypy default: translation issue resolved Message-ID: <571092af.aa0ac20a.5e95e.3ac7@mx.google.com> Author: Richard Plangger Branch: Changeset: r83678:13f292553b0d Date: 2016-04-15 09:00 +0200 http://bitbucket.org/pypy/pypy/changeset/13f292553b0d/ Log: translation issue resolved diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -336,12 +336,15 @@ def _relocate_forbidden_variable(self, reg, var, reverse_mapping, forbidden_vars, forbidden_reg): for candidate in r.MANAGED_REGS: + # move register of var to another register + # thus it is not allowed to bei either reg or forbidden_reg if candidate is reg or candidate is forbidden_reg: continue - if candidate not in forbidden_vars: - var = reverse_mapping.get(candidate, None) - if var is not None: - self._sync_var(var) + # neither can we allow to move it to a register of another forbidden variable + candidate_var = reverse_mapping.get(candidate, None) + if not candidate_var or candidate_var not in forbidden_vars: + if candidate_var is not None: + self._sync_var(candidate_var) self.assembler.regalloc_mov(reg, candidate) self.reg_bindings[var] = candidate reverse_mapping[reg] = var From pypy.commits at gmail.com Fri Apr 15 03:30:05 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:05 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: remove debug cruft Message-ID: <5710987d.47afc20a.10eb2.3c42@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83679:f696f49a3534 Date: 2016-04-14 01:07 +0300 http://bitbucket.org/pypy/pypy/changeset/f696f49a3534/ Log: remove debug cruft diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -4,9 +4,9 @@ PyObject* foo3type_tp_new(PyTypeObject* metatype, PyObject* args, PyObject* kwds) { PyObject* newType; - printf("in foo3type_tp_new, preprocessing...\n"); + /*printf("in foo3type_tp_new, preprocessing...\n"); */ newType = PyType_Type.tp_new(metatype, args, kwds); - printf("in foo3type_tp_new, postprocessing...\n"); + /*printf("in foo3type_tp_new, postprocessing...\n"); */ return newType; } diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -882,7 +882,7 @@ def test_tp_new_in_subclass_of_type(self): module = self.import_module(name='foo3') - print('calling module.footype()...') + #print('calling module.footype()...') module.footype("X", (object,), {}) def test_app_subclass_of_c_type(self): From pypy.commits at gmail.com Fri Apr 15 03:30:07 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:07 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: make fooc3 more like numpy's PyCDoubleArrType_Type Message-ID: <5710987f.c1621c0a.2d020.ffff8ce6@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83680:dabbf2aa9497 Date: 2016-04-15 10:22 +0300 http://bitbucket.org/pypy/pypy/changeset/dabbf2aa9497/ Log: make fooc3 more like numpy's PyCDoubleArrType_Type diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -10,6 +10,8 @@ return newType; } +#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES + PyTypeObject footype = { PyVarObject_HEAD_INIT(NULL, 0) /*tp_name*/ "foo3.footype", @@ -30,7 +32,7 @@ /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ 0, - /*tp_flags*/ Py_TPFLAGS_DEFAULT, + /*tp_flags*/ BASEFLAGS, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, From pypy.commits at gmail.com Fri Apr 15 03:30:09 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:09 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: cleanup Message-ID: <57109881.03dd1c0a.3dcc0.7f52@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83681:a50ca22b1068 Date: 2016-04-15 10:22 +0300 http://bitbucket.org/pypy/pypy/changeset/a50ca22b1068/ Log: cleanup diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -10,7 +10,7 @@ from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rawrefcount From pypy.commits at gmail.com Fri Apr 15 03:30:10 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: avoid segfault in -A tests, still lookiong for why this happens Message-ID: <57109882.91d31c0a.ed1a0.7f0f@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83682:6e02ba5b13d9 Date: 2016-04-15 10:24 +0300 http://bitbucket.org/pypy/pypy/changeset/6e02ba5b13d9/ Log: avoid segfault in -A tests, still lookiong for why this happens diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -630,9 +630,14 @@ if (call_init and not (space.is_w(self, space.w_type) and not __args__.keywords and len(__args__.arguments_w) == 1)): w_descr = space.lookup(w_newobject, '__init__') - w_result = space.get_and_call_args(w_descr, w_newobject, __args__) - if not space.is_w(w_result, space.w_None): - raise oefmt(space.w_TypeError, "__init__() should return None") + if w_descr: + w_result = space.get_and_call_args(w_descr, w_newobject, __args__) + if not space.is_w(w_result, space.w_None): + raise oefmt(space.w_TypeError, "__init__() should return None") + else: + # XXX FIXME - happens when running test_tp_new_in_subclass_of_type + # but only with pypy pytest.py -A ... + print "'__init__' not found when calling %s" % self.getname(space) return w_newobject def descr_repr(self, space): From pypy.commits at gmail.com Fri Apr 15 03:30:12 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:12 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: remove printing Message-ID: <57109884.6614c20a.ebfb5.31c5@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83683:f85aaa2e601f Date: 2016-04-15 10:25 +0300 http://bitbucket.org/pypy/pypy/changeset/f85aaa2e601f/ Log: remove printing diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -280,11 +280,6 @@ try: subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype)) - if subtype == self_pytype: - print 'recursion detected???' - print 'calling tp_new of %s with %s' % ( - rffi.charp2str(self_pytype.c_tp_name), - rffi.charp2str(subtype.c_tp_name)) w_obj = generic_cpy_call(space, tp_new, subtype, w_args, w_kwds) finally: Py_DecRef(space, w_subtype) From pypy.commits at gmail.com Fri Apr 15 03:30:14 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Apr 2016 00:30:14 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: improve debug output when WARN_ABOUT_MISSING_SLOT_FUNCTIONS is True Message-ID: <57109886.e109c20a.35097.33c5@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83684:871731f97a39 Date: 2016-04-15 10:27 +0300 http://bitbucket.org/pypy/pypy/changeset/871731f97a39/ Log: improve debug output when WARN_ABOUT_MISSING_SLOT_FUNCTIONS is True diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -216,7 +216,8 @@ if slot_func_helper is None: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: - os.write(2, method_name + " defined by the type but no slot function defined!\n") + os.write(2, "%s defined by %s but no slot function defined!\n" % ( + method_name, w_type.getname(space))) continue # XXX special case wrapper-functions and use a "specific" slot func From pypy.commits at gmail.com Fri Apr 15 10:13:36 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 07:13:36 -0700 (PDT) Subject: [pypy-commit] pypy default: Two tests and two related fixes in typeobject (segfaults the translated Message-ID: <5710f710.0976c20a.c0c21.ffffae4c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83685:5319e039544e Date: 2016-04-15 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/5319e039544e/ Log: Two tests and two related fixes in typeobject (segfaults the translated pypy) diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1073,6 +1073,24 @@ class D(B, A): # "best base" is A __slots__ = ("__weakref__",) + def test_crash_mro_without_object_1(self): + class X(type): + def mro(self): + return [self] + class C: + __metaclass__ = X + e = raises(TypeError, C) # the lookup of '__new__' fails + assert str(e.value) == "cannot create 'C' instances" + + def test_crash_mro_without_object_2(self): + class X(type): + def mro(self): + return [self, int] + class C(int): + __metaclass__ = X + C() # the lookup of '__new__' succeeds in 'int', + # but the lookup of '__init__' fails + class AppTestWithMethodCacheCounter: spaceconfig = {"objspace.std.withmethodcachecounter": True} diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -618,6 +618,9 @@ w_newfunc = None if w_newfunc is None: w_newtype, w_newdescr = self.lookup_where('__new__') + if w_newdescr is None: # see test_crash_mro_without_object_1 + raise oefmt(space.w_TypeError, "cannot create '%N' instances", + self) w_newfunc = space.get(w_newdescr, self) if (space.config.objspace.std.newshortcut and not we_are_jitted() and @@ -630,9 +633,12 @@ if (call_init and not (space.is_w(self, space.w_type) and not __args__.keywords and len(__args__.arguments_w) == 1)): w_descr = space.lookup(w_newobject, '__init__') - w_result = space.get_and_call_args(w_descr, w_newobject, __args__) - if not space.is_w(w_result, space.w_None): - raise oefmt(space.w_TypeError, "__init__() should return None") + if w_descr is not None: # see test_crash_mro_without_object_2 + w_result = space.get_and_call_args(w_descr, w_newobject, + __args__) + if not space.is_w(w_result, space.w_None): + raise oefmt(space.w_TypeError, + "__init__() should return None") return w_newobject def descr_repr(self, space): From pypy.commits at gmail.com Fri Apr 15 11:26:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Apr 2016 08:26:10 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Allow fd in os.pathconf() Message-ID: <57110812.43ecc20a.80f19.ffffe141@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83686:33cd62260a0c Date: 2016-04-15 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/33cd62260a0c/ Log: Allow fd in os.pathconf() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1853,13 +1853,19 @@ raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(path='str0') + at unwrap_spec(path=path_or_fd(allow_fd=hasattr(os, 'fpathconf'))) def pathconf(space, path, w_name): num = confname_w(space, w_name, os.pathconf_names) - try: - res = os.pathconf(path, num) - except OSError, e: - raise wrap_oserror(space, e) + if path.as_fd != -1: + try: + res = os.fpathconf(path.as_fd, num) + except OSError, e: + raise wrap_oserror(space, e) + else: + try: + res = os.pathconf(path.as_bytes, num) + except OSError, e: + raise wrap_oserror2(space, e, path.w_path) return space.wrap(res) def confstr(space, w_name): From pypy.commits at gmail.com Fri Apr 15 11:42:49 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Apr 2016 08:42:49 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Corner case: don't crash if path == -1 Message-ID: <57110bf9.463f1c0a.45de1.561c@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83687:a60a494657a8 Date: 2016-04-15 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/a60a494657a8/ Log: Corner case: don't crash if path == -1 diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -178,13 +178,17 @@ @specialize.arg(2) def unwrap_fd(space, w_value, allowed_types='integer'): try: - return space.c_int_w(w_value) + result = space.c_int_w(w_value) except OperationError as e: if not e.match(space, space.w_OverflowError): raise oefmt(space.w_TypeError, "argument should be %s, not %T", allowed_types, w_value) else: raise + if result == -1: + # -1 is used as sentinel value for not a fd + raise oefmt(space.w_ValueError, "invalid file descriptor: -1") + return result def _unwrap_dirfd(space, w_value): if space.is_none(w_value): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -193,6 +193,7 @@ assert "can't specify None" in str(excinfo.value) excinfo = raises(TypeError, self.posix.stat, 2.) assert "should be string, bytes or integer, not float" in str(excinfo.value) + raises(ValueError, self.posix.stat, -1) if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): From pypy.commits at gmail.com Fri Apr 15 11:55:07 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 08:55:07 -0700 (PDT) Subject: [pypy-commit] cffi default: Test and fix for converting empty ffi's in embedding mode Message-ID: <57110edb.52ad1c0a.332f1.172c@mx.google.com> Author: Armin Rigo Branch: Changeset: r2657:28bd9c90bce3 Date: 2016-04-15 17:55 +0200 http://bitbucket.org/cffi/cffi/changeset/28bd9c90bce3/ Log: Test and fix for converting empty ffi's in embedding mode diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1231,7 +1231,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) diff --git a/testing/embedding/empty.py b/testing/embedding/empty.py new file mode 100644 --- /dev/null +++ b/testing/embedding/empty.py @@ -0,0 +1,10 @@ +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api("") + +ffi.set_source("_empty_cffi", "") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -164,6 +164,9 @@ class TestBasic(EmbeddingTests): + def test_empty(self): + empty_cffi = self.prepare_module('empty') + def test_basic(self): add1_cffi = self.prepare_module('add1') self.compile('add1-test', [add1_cffi]) From pypy.commits at gmail.com Fri Apr 15 12:11:33 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Apr 2016 09:11:33 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Close branch rposix-for-3 Message-ID: <571112b5.d3981c0a.a06f4.ffffdb1e@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83688:9f8e819317a9 Date: 2016-04-15 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/9f8e819317a9/ Log: Close branch rposix-for-3 From pypy.commits at gmail.com Fri Apr 15 12:11:47 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Apr 2016 09:11:47 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in rposix-for-3 (pull request #427) Message-ID: <571112c3.ca941c0a.47ac7.ffffb9c5@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83689:379d221b82b9 Date: 2016-04-15 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/379d221b82b9/ Log: Merged in rposix-for-3 (pull request #427) Rposix for 3 diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -23,6 +23,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.rposix import ( replace_os_function, handle_posix_error, _as_bytes0) +from rpython.rlib import rposix _WIN32 = sys.platform.startswith('win') _LINUX = sys.platform.startswith('linux') @@ -36,13 +37,12 @@ # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. -if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): - TIMESPEC = platform.Struct('struct timespec', - [('tv_sec', rffi.TIME_T), - ('tv_nsec', rffi.LONG)]) -else: +try: + from rpython.rlib.rposix import TIMESPEC +except ImportError: TIMESPEC = None + # all possible fields - some of them are not available on all platforms ALL_STAT_FIELDS = [ ("st_mode", lltype.Signed), @@ -300,13 +300,6 @@ includes=INCLUDES ) -if TIMESPEC is not None: - class CConfig_for_timespec: - _compilation_info_ = compilation_info - TIMESPEC = TIMESPEC - TIMESPEC = lltype.Ptr( - platform.configure(CConfig_for_timespec)['TIMESPEC']) - def posix_declaration(try_to_add=None): global STAT_STRUCT, STATVFS_STRUCT @@ -322,7 +315,7 @@ if _name == originalname: # replace the 'st_atime' field of type rffi.DOUBLE # with a field 'st_atim' of type 'struct timespec' - lst[i] = (timespecname, TIMESPEC.TO) + lst[i] = (timespecname, TIMESPEC) break _expand(LL_STAT_FIELDS, 'st_atime', 'st_atim') @@ -512,6 +505,23 @@ path = traits.as_str0(path) return win32_xstat(traits, path, traverse=False) +if rposix.HAVE_FSTATAT: + from rpython.rlib.rposix import AT_FDCWD, AT_SYMLINK_NOFOLLOW + c_fstatat = rffi.llexternal('fstatat', + [rffi.INT, rffi.CCHARP, STAT_STRUCT, rffi.INT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, macro=True) + + def fstatat(pathname, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flags = 0 + else: + flags = AT_SYMLINK_NOFOLLOW + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + error = c_fstatat(dir_fd, pathname, stresult, flags) + handle_posix_error('fstatat', error) + return build_stat_result(stresult) + @replace_os_function('fstatvfs') def fstatvfs(fd): with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult: diff --git a/rpython/rlib/test/test_rposix_stat.py b/rpython/rlib/test/test_rposix_stat.py --- a/rpython/rlib/test/test_rposix_stat.py +++ b/rpython/rlib/test/test_rposix_stat.py @@ -56,3 +56,13 @@ except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) rposix_stat.fstatvfs(0) + + at py.test.mark.skipif("not hasattr(rposix_stat, 'fstatat')") +def test_fstatat(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + try: + result = rposix_stat.fstatat('file', dir_fd=dirfd, follow_symlinks=False) + finally: + os.close(dirfd) + assert result.st_atime == tmpdir.join('file').atime() From pypy.commits at gmail.com Fri Apr 15 12:23:05 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 09:23:05 -0700 (PDT) Subject: [pypy-commit] cffi default: ffi.rawstring(), with a minimal interface Message-ID: <57111569.08851c0a.f6e6b.1d7e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2658:21bef1a21d1b Date: 2016-04-15 18:23 +0200 http://bitbucket.org/cffi/cffi/changeset/21bef1a21d1b/ Log: ffi.rawstring(), with a minimal interface diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5582,6 +5582,36 @@ return NULL; } +static PyObject *b_rawstring(PyObject *self, PyObject *arg) +{ + CDataObject *cd; + Py_ssize_t length; + + if (!CData_Check(arg)) { + PyErr_SetString(PyExc_TypeError, "expected a 'cdata' object"); + return NULL; + } + cd = (CDataObject *)arg; + if (!(cd->c_type->ct_flags & CT_ARRAY) || + !(cd->c_type->ct_itemdescr->ct_flags & CT_PRIMITIVE_CHAR)) { + PyErr_Format(PyExc_TypeError, + "expected an array of 'char' or 'wchar_t', got '%s'", + cd->c_type->ct_name); + return NULL; + } + + length = get_array_length(cd); + if (cd->c_type->ct_itemdescr->ct_size == sizeof(char)) + return PyBytes_FromStringAndSize(cd->c_data, length); +#ifdef HAVE_WCHAR_H + else if (cd->c_type->ct_itemdescr->ct_size == sizeof(wchar_t)) + return _my_PyUnicode_FromWideChar((wchar_t *)cd->c_data, length); +#endif + + PyErr_SetString(PyExc_SystemError, "bad size for char-like"); + return NULL; +} + static PyObject *b_buffer(PyObject *self, PyObject *args, PyObject *kwds) { CDataObject *cd; @@ -6225,6 +6255,7 @@ {"rawaddressof", b_rawaddressof, METH_VARARGS}, {"getcname", b_getcname, METH_VARARGS}, {"string", (PyCFunction)b_string, METH_VARARGS | METH_KEYWORDS}, + {"rawstring", b_rawstring, METH_O}, {"buffer", (PyCFunction)b_buffer, METH_VARARGS | METH_KEYWORDS}, {"get_errno", b_get_errno, METH_NOARGS}, {"set_errno", b_set_errno, METH_O}, diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -459,6 +459,19 @@ #define ffi_string b_string /* ffi_string() => b_string() from _cffi_backend.c */ +PyDoc_STRVAR(ffi_rawstring_doc, +"Convert a cdata that is an array of 'char' or 'wchar_t' to\n" +"a byte or unicode string. Unlike ffi.string(), it does not stop\n" +"at the first null.\n" +"\n" +"Note that if you have a pointer and an explicit length, you\n" +"can use 'p[0:length]' to make an array view. This is similar to\n" +"the construct 'list(p[0:length])', which returns a list of chars/\n" +"unichars/ints/floats."); + +#define ffi_rawstring b_rawstring /* ffi_rawstring() => b_rawstring() + from _cffi_backend.c */ + PyDoc_STRVAR(ffi_buffer_doc, "Return a read-write buffer object that references the raw C data\n" "pointed to by the given 'cdata'. The 'cdata' must be a pointer or an\n" @@ -1090,6 +1103,7 @@ {"new_allocator",(PyCFunction)ffi_new_allocator,METH_VKW,ffi_new_allocator_doc}, {"new_handle", (PyCFunction)ffi_new_handle, METH_O, ffi_new_handle_doc}, {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, + {"rawstring", (PyCFunction)ffi_rawstring, METH_O, ffi_rawstring_doc}, {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, {"string", (PyCFunction)ffi_string, METH_VKW, ffi_string_doc}, {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -299,6 +299,18 @@ """ return self._backend.string(cdata, maxlen) + def rawstring(self, cdata): + """Convert a cdata that is an array of 'char' or 'wchar_t' to + a byte or unicode string. Unlike ffi.string(), it does not stop + at the first null. + + Note that if you have a pointer and an explicit length, you + can use 'p[0:length]' to make an array view. This is similar to + the construct 'list(p[0:length])', which returns a list of chars/ + unichars/ints/floats. + """ + return self._backend.rawstring(cdata) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -472,3 +472,12 @@ assert ffi.list_types() == (['b', 'bb', 'bbb'], ['a', 'cc', 'ccc'], ['aa', 'aaa', 'g']) + + def test_rawstring(self): + ffi = FFI() + p = ffi.new("char[]", "abc\x00def") + assert ffi.rawstring(p) == "abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("wchar_t[]", u"abc\x00def") + assert ffi.rawstring(p) == u"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == u"bc\x00de" diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -495,3 +495,15 @@ assert i < 20 time.sleep(0.51) assert seen == ['init!', 'oops'] * 3 + +def test_rawstring(): + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", "abc\x00def") + assert ffi.rawstring(p) == "abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("wchar_t[]", u"abc\x00def") + assert ffi.rawstring(p) == u"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == u"bc\x00de" + # + py.test.raises(TypeError, ffi.rawstring, "foobar") + py.test.raises(TypeError, ffi.rawstring, p + 1) From pypy.commits at gmail.com Fri Apr 15 12:27:38 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 09:27:38 -0700 (PDT) Subject: [pypy-commit] cffi default: Add a test here Message-ID: <5711167a.654fc20a.4179b.147f@mx.google.com> Author: Armin Rigo Branch: Changeset: r2659:d13f78c88231 Date: 2016-04-15 18:28 +0200 http://bitbucket.org/cffi/cffi/changeset/d13f78c88231/ Log: Add a test here diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3525,3 +3525,18 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_rawstring(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, "abc\x00def") + assert rawstring(p) == "abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == "bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert rawstring(p) == u"abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == u"bc\x00de" + # + py.test.raises(TypeError, rawstring, "foobar") + py.test.raises(TypeError, rawstring, p + 1) From pypy.commits at gmail.com Fri Apr 15 12:49:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Apr 2016 09:49:09 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <57111b85.12871c0a.bfc1e.6fe2@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83690:1a9530342a5f Date: 2016-04-15 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/1a9530342a5f/ Log: hg merge default diff too long, truncating to 2000 out of 6434 lines diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,136 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about a month after PyPy 5.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0 released in March 2015) +========================================================= + +* New features: + + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. + + * Add better support for PyUnicodeObject in the C-API compatibility layer + + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility + +* Bug Fixes + + * Catch exceptions raised in an exit function + + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Support glibc < 2.16 on ARM + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Implemented numpy.where for a single argument + + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy + +* Performance improvements: + + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting + + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend + +* Internal refactorings: + + * Use a simpler logger to speed up translation + + * Drop vestiges of Python 2.5 support in testing + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -0,0 +1,62 @@ +========================= +What's new in PyPy 5.1 +========================= + +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,42 +1,7 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 +.. this is a revision shortly after release-5.1 +.. startrev: 2180e1eaf6f6 -.. branch: s390x-backend - -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. - -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces - -.. branch: win32-lib-name - -.. branch: remove-frame-forcing-in-executioncontext - -.. branch: rposix-for-3 - -Wrap more POSIX functions in `rpython.rlib.rposix`. diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,9 +5,9 @@ # ____________________________________________________________ -def sorted(lst, key=None, reverse=False): +def sorted(iterable, key=None, reverse=False): "sorted(iterable, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(key=key, reverse=reverse) return sorted_lst diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -2,7 +2,8 @@ import sys class AppTestWin32: - spaceconfig = dict(usemodules=('_multiprocessing',)) + spaceconfig = dict(usemodules=('_multiprocessing', + 'signal', '_rawffi', 'binascii')) def setup_class(cls): if sys.platform != "win32": diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -758,6 +758,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -234,7 +222,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -268,6 +257,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -275,10 +266,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -366,7 +353,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -794,10 +782,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -806,7 +795,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -823,6 +813,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -898,13 +893,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -997,18 +998,12 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1018,19 +1013,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1047,13 +1051,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1162,7 +1163,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1204,14 +1206,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -139,7 +139,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,69 +1,9 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H +/* For testing ndarrayobject only */ + #include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -91,18 +31,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - /* * These characters correspond to the array type and the struct * module @@ -157,27 +85,6 @@ }; typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -186,7 +93,6 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -200,729 +106,6 @@ NPY_KEEPORDER=2 } NPY_ORDER; -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - * The main array object structure. - * - * It has been recommended to use the inline functions defined below - * (PyArray_DATA and friends) to access fields here for a number of - * releases. Direct access to the members themselves is deprecated. - * To ensure that your code does not use deprecated access, - * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - * (or NPY_1_8_API_VERSION or higher as required). - */ -/* This struct will be moved to a private header in a future release */ -typedef struct tagPyArrayObject_fields { - PyObject_HEAD - /* Pointer to the raw data buffer */ - char *data; - /* The number of dimensions, also called 'ndim' */ - int nd; - /* The size in each dimension, also called 'shape' */ - npy_intp *dimensions; - /* - * Number of bytes to jump to get to the - * next element in each dimension - */ - npy_intp *strides; - /* - * This object is decref'd upon - * deletion of array. Except in the - * case of UPDATEIFCOPY which has - * special handling. - * - * For views it points to the original - * array, collapsed so no chains of - * views occur. - * - * For creation from buffer object it - * points to an object that shold be - * decref'd on deletion - * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one - */ - PyObject *base; - /* Pointer to type structure */ - PyArray_Descr *descr; - /* Flags describing array -- see below */ - int flags; - /* For weak references */ - PyObject *weakreflist; -} PyArrayObject_fields; - -/* - * To hide the implementation details, we only expose - * the Python struct HEAD. - */ -#if !defined(NPY_NO_DEPRECATED_API) || \ - (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) -/* - * Can't put this in npy_deprecated_api.h like the others. - * PyArrayObject field access is deprecated as of NumPy 1.7. - */ -typedef PyArrayObject_fields PyArrayObject; -#else -typedef struct tagPyArrayObject { - PyObject_HEAD -} PyArrayObject; -#endif - -#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - -typedef struct { - NPY_DATETIMEUNIT base; - int num; -} PyArray_DatetimeMetaData; - -typedef struct { - NpyAuxData base; - PyArray_DatetimeMetaData meta; -} PyArray_DatetimeDTypeMetaData; - -/* - * This structure contains an exploded view of a date-time value. - * NaT is represented by year == NPY_DATETIME_NAT. - */ -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -/* This is not used internally. */ -typedef struct { - npy_int64 day; - npy_int32 sec, us, ps, as; -} npy_timedeltastruct; - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* - * Means c-style contiguous (last index varies the fastest). The data - * elements right after each other. - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 - -/* - * Set if array is a contiguous Fortran array: the first index varies - * the fastest in memory (strides array is reverse of C-contiguous - * array) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 - -/* - * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a - * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with - * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS - * at the same time if they have either zero or one element. - * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional - * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements - * and the array is contiguous if ndarray.squeeze() is contiguous. - * I.e. dimensions for which `ndarray.shape[dimension] == 1` are - * ignored. - */ - -/* - * If set, the array owns the data: it will be free'd when the array - * is deleted. - * - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_OWNDATA 0x0004 - -/* - * An array never has the next four set; they're only used as parameter - * flags to the the various FromAny functions - * - * This flag may be requested in constructor functions. - */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_ARRAY_FORCECAST 0x0010 - -/* - * Always copy the array. Returned arrays are always CONTIGUOUS, - * ALIGNED, and WRITEABLE. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSURECOPY 0x0020 - -/* - * Make sure the returned array is a base-class ndarray - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ENSUREARRAY 0x0040 - -/* - * Make sure that the strides are in units of the element size Needed - * for some operations with record-arrays. - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 - -/* - * Array data is aligned on the appropiate memory address for the type - * stored according to how the compiler would align things (e.g., an - * array of integers (4 bytes each) starts on a memory address that's - * a multiple of 4) - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_ALIGNED 0x0100 - -/* - * Array data has the native endianness - * - * This flag may be requested in constructor functions. - */ -#define NPY_ARRAY_NOTSWAPPED 0x0200 - -/* - * Array data is writeable - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_WRITEABLE 0x0400 - -/* - * If this flag is set, then base contains a pointer to an array of - * the same size that should be updated with the current contents of - * this array when this array is deallocated - * - * This flag may be requested in constructor functions. - * This flag may be tested for in PyArray_FLAGS(arr). - */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -/* - * NOTE: there are also internal flags defined in multiarray/arrayobject.h, - * which start at bit 31 and work down. - */ - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -/* This flag is for the array interface, not PyArrayObject */ -#define NPY_ARR_HAS_DESCR 0x0800 - - - - -/* - * Size of internal buffers used for alignment Make BUFSIZE a multiple - * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned - */ -#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) -#define NPY_BUFSIZE 8192 -/* buffer stress test size: */ -/*#define NPY_BUFSIZE 17*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* * C API: consists of Macros and functions. The MACROS are defined @@ -937,850 +120,4 @@ #define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) #define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ - { _save = PyEval_SaveThread();} } while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); -#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/********************************** - * The nditer object, added in 1.6 - **********************************/ - -/* The actual structure of the iterator is an internal detail */ -typedef struct NpyIter_InternalOnly NpyIter; - -/* Iterator function pointers that may be specialized */ -typedef int (NpyIter_IterNextFunc)(NpyIter *iter); -typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, - npy_intp *outcoords); - -/*** Global flags that may be passed to the iterator constructors ***/ - -/* Track an index representing C order */ -#define NPY_ITER_C_INDEX 0x00000001 -/* Track an index representing Fortran order */ -#define NPY_ITER_F_INDEX 0x00000002 -/* Track a multi-index */ -#define NPY_ITER_MULTI_INDEX 0x00000004 -/* User code external to the iterator does the 1-dimensional innermost loop */ -#define NPY_ITER_EXTERNAL_LOOP 0x00000008 -/* Convert all the operands to a common data type */ -#define NPY_ITER_COMMON_DTYPE 0x00000010 -/* Operands may hold references, requiring API access during iteration */ -#define NPY_ITER_REFS_OK 0x00000020 -/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ -#define NPY_ITER_ZEROSIZE_OK 0x00000040 -/* Permits reductions (size-0 stride with dimension size > 1) */ -#define NPY_ITER_REDUCE_OK 0x00000080 -/* Enables sub-range iteration */ -#define NPY_ITER_RANGED 0x00000100 -/* Enables buffering */ -#define NPY_ITER_BUFFERED 0x00000200 -/* When buffering is enabled, grows the inner loop if possible */ -#define NPY_ITER_GROWINNER 0x00000400 -/* Delay allocation of buffers until first Reset* call */ -#define NPY_ITER_DELAY_BUFALLOC 0x00000800 -/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ -#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 - -/*** Per-operand flags that may be passed to the iterator constructors ***/ - -/* The operand will be read from and written to */ -#define NPY_ITER_READWRITE 0x00010000 -/* The operand will only be read from */ -#define NPY_ITER_READONLY 0x00020000 -/* The operand will only be written to */ -#define NPY_ITER_WRITEONLY 0x00040000 -/* The operand's data must be in native byte order */ -#define NPY_ITER_NBO 0x00080000 -/* The operand's data must be aligned */ -#define NPY_ITER_ALIGNED 0x00100000 -/* The operand's data must be contiguous (within the inner loop) */ -#define NPY_ITER_CONTIG 0x00200000 -/* The operand may be copied to satisfy requirements */ -#define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ -#define NPY_ITER_UPDATEIFCOPY 0x00800000 -/* Allocate the operand if it is NULL */ -#define NPY_ITER_ALLOCATE 0x01000000 -/* If an operand is allocated, don't use any subtype */ -#define NPY_ITER_NO_SUBTYPE 0x02000000 -/* This is a virtual array slot, operand is NULL but temporary data is there */ -#define NPY_ITER_VIRTUAL 0x04000000 -/* Require that the dimension match the iterator dimensions exactly */ -#define NPY_ITER_NO_BROADCAST 0x08000000 -/* A mask is being used on this array, affects buffer -> array copy */ -#define NPY_ITER_WRITEMASKED 0x10000000 -/* This array is the mask for all WRITEMASKED operands */ -#define NPY_ITER_ARRAYMASK 0x20000000 - -#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff -#define NPY_ITER_PER_OP_FLAGS 0xffff0000 - - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* - * type of the function which translates a set of coordinates to a - * pointer to the data - */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) do { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} while (0) - -#define _PyArray_ITER_NEXT1(it) do { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} while (0) - -#define _PyArray_ITER_NEXT2(it) do { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} while (0) - -#define _PyArray_ITER_NEXT3(it) do { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] \ - (it)->backstrides[1]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_NEXT(it) do { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} while (0) - -#define PyArray_ITER_GOTO(it, destination) do { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} while (0) - -#define PyArray_ITER_GOTO1D(it, ind) do { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ - __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ - else { \ - _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} while (0) - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - * Any object passed to PyArray_Broadcast must be binary compatible - * with this structure. - */ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} while (0) - -#define PyArray_MultiIter_NEXT(multi) do { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ From pypy.commits at gmail.com Fri Apr 15 13:10:38 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 10:10:38 -0700 (PDT) Subject: [pypy-commit] cffi default: Also accept arrays of int8_t or uint8_t, like ffi.string() Message-ID: <5711208e.2179c20a.cd842.2ba8@mx.google.com> Author: Armin Rigo Branch: Changeset: r2660:89ba430d6c58 Date: 2016-04-15 19:11 +0200 http://bitbucket.org/cffi/cffi/changeset/89ba430d6c58/ Log: Also accept arrays of int8_t or uint8_t, like ffi.string() diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5585,6 +5585,7 @@ static PyObject *b_rawstring(PyObject *self, PyObject *arg) { CDataObject *cd; + CTypeDescrObject *ctitem; Py_ssize_t length; if (!CData_Check(arg)) { @@ -5592,23 +5593,25 @@ return NULL; } cd = (CDataObject *)arg; - if (!(cd->c_type->ct_flags & CT_ARRAY) || - !(cd->c_type->ct_itemdescr->ct_flags & CT_PRIMITIVE_CHAR)) { - PyErr_Format(PyExc_TypeError, - "expected an array of 'char' or 'wchar_t', got '%s'", - cd->c_type->ct_name); - return NULL; - } - - length = get_array_length(cd); - if (cd->c_type->ct_itemdescr->ct_size == sizeof(char)) - return PyBytes_FromStringAndSize(cd->c_data, length); + ctitem = cd->c_type->ct_itemdescr; + if ((cd->c_type->ct_flags & CT_ARRAY) && + (ctitem->ct_flags & (CT_PRIMITIVE_CHAR | + CT_PRIMITIVE_SIGNED | + CT_PRIMITIVE_UNSIGNED))) { + length = get_array_length(cd); + if (ctitem->ct_size == sizeof(char)) + return PyBytes_FromStringAndSize(cd->c_data, length); #ifdef HAVE_WCHAR_H - else if (cd->c_type->ct_itemdescr->ct_size == sizeof(wchar_t)) - return _my_PyUnicode_FromWideChar((wchar_t *)cd->c_data, length); + else if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) { + assert(ctitem->ct_size == sizeof(wchar_t)); + return _my_PyUnicode_FromWideChar((wchar_t *)cd->c_data, length); + } #endif - - PyErr_SetString(PyExc_SystemError, "bad size for char-like"); + } + PyErr_Format(PyExc_TypeError, + "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " + "or 'wchar_t[]', got '%s'", + cd->c_type->ct_name); return NULL; } diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3537,6 +3537,10 @@ p = newp(BArray, u"abc\x00def") assert rawstring(p) == u"abc\x00def\x00\x00\x00" assert rawstring(p[1:6]) == u"bc\x00de" + BChar = new_primitive_type("uint8_t") + BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] + p = newp(BArray, [65 + i for i in range(10)]) + assert rawstring(p) == "ABCDEFGHIJ" # py.test.raises(TypeError, rawstring, "foobar") py.test.raises(TypeError, rawstring, p + 1) From pypy.commits at gmail.com Fri Apr 15 13:29:09 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 10:29:09 -0700 (PDT) Subject: [pypy-commit] pypy default: Add ffi.rawstring() Message-ID: <571124e5.522e1c0a.a6252.77a4@mx.google.com> Author: Armin Rigo Branch: Changeset: r83691:556303cdfd09 Date: 2016-04-15 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/556303cdfd09/ Log: Add ffi.rawstring() diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'rawstring': 'func.rawstring', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): @@ -108,6 +109,21 @@ def typeoffsetof_index(self, index): return self.ctptr.typeoffsetof_index(index) + def rawstring(self, w_cdata): + if isinstance(self.ctitem, ctypeprim.W_CTypePrimitive): + space = self.space + length = w_cdata.get_array_length() + if self.ctitem.size == rffi.sizeof(lltype.Char): + with w_cdata as ptr: + s = rffi.charpsize2str(ptr, length) + return space.wrapbytes(s) + elif self.is_unichar_ptr_or_array(): + with w_cdata as ptr: + cdata = rffi.cast(rffi.CWCHARP, ptr) + u = rffi.wcharpsize2unicode(cdata, length) + return space.wrap(u) + return W_CTypePtrOrArray.rawstring(self, w_cdata) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -127,6 +127,12 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def rawstring(self, cdataobj): + space = self.space + raise oefmt(space.w_TypeError, + "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " + "or 'wchar_t[]', got '%s'", self.name) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,21 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData) + def descr_rawstring(self, w_cdata): + """\ +Convert a cdata that is an array of 'char' or 'wchar_t' to +a byte or unicode string. Unlike ffi.string(), it does not stop +at the first null. + +Note that if you have a pointer and an explicit length, you +can use 'p[0:length]' to make an array view. This is similar to +the construct 'list(p[0:length])', which returns a list of chars/ +unichars/ints/floats.""" + # + return w_cdata.ctype.rawstring(w_cdata) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -736,6 +751,7 @@ new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), offsetof = interp2app(W_FFIObject.descr_offsetof), + rawstring = interp2app(W_FFIObject.descr_rawstring), sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,6 +78,12 @@ # ____________________________________________________________ + at unwrap_spec(w_cdata=cdataobj.W_CData) +def rawstring(space, w_cdata): + return w_cdata.ctype.rawstring(w_cdata) + +# ____________________________________________________________ + def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3514,3 +3514,22 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_rawstring(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, "abc\x00def") + assert rawstring(p) == "abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == "bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert rawstring(p) == u"abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == u"bc\x00de" + BChar = new_primitive_type("uint8_t") + BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] + p = newp(BArray, [65 + i for i in range(10)]) + assert rawstring(p) == "ABCDEFGHIJ" + # + py.test.raises(TypeError, rawstring, "foobar") + py.test.raises(TypeError, rawstring, p + 1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -476,3 +476,16 @@ for i in range(5): raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) + + def test_rawstring(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", "abc\x00def") + assert ffi.rawstring(p) == "abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("wchar_t[]", u"abc\x00def") + assert ffi.rawstring(p) == u"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == u"bc\x00de" + # + raises(TypeError, ffi.rawstring, "foobar") + raises(TypeError, ffi.rawstring, p + 1) From pypy.commits at gmail.com Fri Apr 15 13:35:40 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Apr 2016 10:35:40 -0700 (PDT) Subject: [pypy-commit] cffi default: Python3 compat Message-ID: <5711266c.811d1c0a.3ac80.ffff8c36@mx.google.com> Author: Armin Rigo Branch: Changeset: r2661:6e5dfaf706ba Date: 2016-04-15 19:36 +0200 http://bitbucket.org/cffi/cffi/changeset/6e5dfaf706ba/ Log: Python3 compat diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3529,9 +3529,9 @@ def test_rawstring(): BChar = new_primitive_type("char") BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] - p = newp(BArray, "abc\x00def") - assert rawstring(p) == "abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == "bc\x00de" + p = newp(BArray, b"abc\x00def") + assert rawstring(p) == b"abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == b"bc\x00de" BWChar = new_primitive_type("wchar_t") BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] p = newp(BArray, u"abc\x00def") @@ -3540,7 +3540,7 @@ BChar = new_primitive_type("uint8_t") BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] p = newp(BArray, [65 + i for i in range(10)]) - assert rawstring(p) == "ABCDEFGHIJ" + assert rawstring(p) == b"ABCDEFGHIJ" # py.test.raises(TypeError, rawstring, "foobar") py.test.raises(TypeError, rawstring, p + 1) diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -475,9 +475,9 @@ def test_rawstring(self): ffi = FFI() - p = ffi.new("char[]", "abc\x00def") - assert ffi.rawstring(p) == "abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("char[]", b"abc\x00def") + assert ffi.rawstring(p) == b"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == b"bc\x00de" p = ffi.new("wchar_t[]", u"abc\x00def") assert ffi.rawstring(p) == u"abc\x00def\x00" assert ffi.rawstring(p[1:6]) == u"bc\x00de" diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -498,9 +498,9 @@ def test_rawstring(): ffi = _cffi1_backend.FFI() - p = ffi.new("char[]", "abc\x00def") - assert ffi.rawstring(p) == "abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("char[]", b"abc\x00def") + assert ffi.rawstring(p) == b"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == b"bc\x00de" p = ffi.new("wchar_t[]", u"abc\x00def") assert ffi.rawstring(p) == u"abc\x00def\x00" assert ffi.rawstring(p[1:6]) == u"bc\x00de" From pypy.commits at gmail.com Fri Apr 15 17:07:45 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 14:07:45 -0700 (PDT) Subject: [pypy-commit] pypy default: clarify UINT handling Message-ID: <57115821.d5da1c0a.d3c4.ffff96f7@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83692:2a98879d4193 Date: 2016-04-15 13:46 -0700 http://bitbucket.org/pypy/pypy/changeset/2a98879d4193/ Log: clarify UINT handling diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -556,18 +556,18 @@ self.w_class = None self.method = method - if self.canoverflow: - assert self.bytes <= rffi.sizeof(rffi.ULONG) - if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': - # Treat this type as a ULONG - self.unwrap = 'bigint_w' - self.canoverflow = False - def _freeze_(self): # hint for the annotator: track individual constant instances return True +if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG): + # 32 bits: UINT can't safely overflow into a C long (rpython int) + # via int_w, handle it like ULONG below + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'bigint_w') +else: + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'int_w', True) types = { 'c': TypeCode(lltype.Char, 'str_w', method=''), 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), @@ -576,7 +576,7 @@ 'h': TypeCode(rffi.SHORT, 'int_w', True, True), 'H': TypeCode(rffi.USHORT, 'int_w', True), 'i': TypeCode(rffi.INT, 'int_w', True, True), - 'I': TypeCode(rffi.UINT, 'int_w', True), + 'I': _UINTTypeCode, 'l': TypeCode(rffi.LONG, 'int_w', True, True), 'L': TypeCode(rffi.ULONG, 'bigint_w'), # Overflow handled by # rbigint.touint() which From pypy.commits at gmail.com Fri Apr 15 17:07:47 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 14:07:47 -0700 (PDT) Subject: [pypy-commit] pypy py3k: clarify UINT handling Message-ID: <57115823.06d8c20a.38efb.7950@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83693:34627b5368d9 Date: 2016-04-15 13:46 -0700 http://bitbucket.org/pypy/pypy/changeset/34627b5368d9/ Log: clarify UINT handling (grafted from 2a98879d41935b018e38646f5a2c48f5d4736e77) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -593,13 +593,6 @@ self.w_class = None self.method = method - if self.canoverflow: - if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': - # Treat this type as a ULONG - self.unwrap = 'bigint_w' - self.canoverflow = False - def _freeze_(self): # hint for the annotator: track individual constant instances return True @@ -608,6 +601,14 @@ return self.unwrap == 'int_w' or self.unwrap == 'bigint_w' +if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG): + # 32 bits: UINT can't safely overflow into a C long (rpython int) + # via int_w, handle it like ULONG below + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'bigint_w') +else: + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'int_w', True) types = { 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), @@ -615,7 +616,7 @@ 'h': TypeCode(rffi.SHORT, 'int_w', True, True), 'H': TypeCode(rffi.USHORT, 'int_w', True), 'i': TypeCode(rffi.INT, 'int_w', True, True), - 'I': TypeCode(rffi.UINT, 'int_w', True), + 'I': _UINTTypeCode, 'l': TypeCode(rffi.LONG, 'int_w', True, True), 'L': TypeCode(rffi.ULONG, 'bigint_w.touint'), 'q': TypeCode(rffi.LONGLONG, 'bigint_w.tolonglong', True, True), From pypy.commits at gmail.com Fri Apr 15 17:07:48 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 14:07:48 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix for py3k's new convert scheme Message-ID: <57115824.81f0c20a.b9052.ffffc4f7@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83694:fc05d7cc3776 Date: 2016-04-15 14:06 -0700 http://bitbucket.org/pypy/pypy/changeset/fc05d7cc3776/ Log: fix for py3k's new convert scheme diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -605,7 +605,7 @@ # 32 bits: UINT can't safely overflow into a C long (rpython int) # via int_w, handle it like ULONG below _UINTTypeCode = \ - TypeCode(rffi.UINT, 'bigint_w') + TypeCode(rffi.UINT, 'bigint_w.touint') else: _UINTTypeCode = \ TypeCode(rffi.UINT, 'int_w', True) From pypy.commits at gmail.com Fri Apr 15 19:11:38 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 16:11:38 -0700 (PDT) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <5711752a.c50a1c0a.1a9c6.ffffef44@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83695:3bc87fc47479 Date: 2016-04-15 16:10 -0700 http://bitbucket.org/pypy/pypy/changeset/3bc87fc47479/ Log: merge default diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'rawstring': 'func.rawstring', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): @@ -108,6 +109,21 @@ def typeoffsetof_index(self, index): return self.ctptr.typeoffsetof_index(index) + def rawstring(self, w_cdata): + if isinstance(self.ctitem, ctypeprim.W_CTypePrimitive): + space = self.space + length = w_cdata.get_array_length() + if self.ctitem.size == rffi.sizeof(lltype.Char): + with w_cdata as ptr: + s = rffi.charpsize2str(ptr, length) + return space.wrapbytes(s) + elif self.is_unichar_ptr_or_array(): + with w_cdata as ptr: + cdata = rffi.cast(rffi.CWCHARP, ptr) + u = rffi.wcharpsize2unicode(cdata, length) + return space.wrap(u) + return W_CTypePtrOrArray.rawstring(self, w_cdata) + class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -127,6 +127,12 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def rawstring(self, cdataobj): + space = self.space + raise oefmt(space.w_TypeError, + "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " + "or 'wchar_t[]', got '%s'", self.name) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,21 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData) + def descr_rawstring(self, w_cdata): + """\ +Convert a cdata that is an array of 'char' or 'wchar_t' to +a byte or unicode string. Unlike ffi.string(), it does not stop +at the first null. + +Note that if you have a pointer and an explicit length, you +can use 'p[0:length]' to make an array view. This is similar to +the construct 'list(p[0:length])', which returns a list of chars/ +unichars/ints/floats.""" + # + return w_cdata.ctype.rawstring(w_cdata) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -736,6 +751,7 @@ new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), offsetof = interp2app(W_FFIObject.descr_offsetof), + rawstring = interp2app(W_FFIObject.descr_rawstring), sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,6 +78,12 @@ # ____________________________________________________________ + at unwrap_spec(w_cdata=cdataobj.W_CData) +def rawstring(space, w_cdata): + return w_cdata.ctype.rawstring(w_cdata) + +# ____________________________________________________________ + def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3514,3 +3514,22 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_rawstring(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, "abc\x00def") + assert rawstring(p) == "abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == "bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert rawstring(p) == u"abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == u"bc\x00de" + BChar = new_primitive_type("uint8_t") + BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] + p = newp(BArray, [65 + i for i in range(10)]) + assert rawstring(p) == "ABCDEFGHIJ" + # + py.test.raises(TypeError, rawstring, "foobar") + py.test.raises(TypeError, rawstring, p + 1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -476,3 +476,16 @@ for i in range(5): raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) + + def test_rawstring(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", "abc\x00def") + assert ffi.rawstring(p) == "abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == "bc\x00de" + p = ffi.new("wchar_t[]", u"abc\x00def") + assert ffi.rawstring(p) == u"abc\x00def\x00" + assert ffi.rawstring(p[1:6]) == u"bc\x00de" + # + raises(TypeError, ffi.rawstring, "foobar") + raises(TypeError, ffi.rawstring, p + 1) From pypy.commits at gmail.com Fri Apr 15 21:47:15 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 18:47:15 -0700 (PDT) Subject: [pypy-commit] pypy py3k: reapply test_hash skips from ce0cdf69b07a Message-ID: <571199a3.4967c20a.53666.ffffc768@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83696:93e3beb469e2 Date: 2016-04-15 16:33 -0700 http://bitbucket.org/pypy/pypy/changeset/93e3beb469e2/ Log: reapply test_hash skips from ce0cdf69b07a diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py --- a/lib-python/3/test/test_hash.py +++ b/lib-python/3/test/test_hash.py @@ -8,6 +8,7 @@ import sys import unittest from test.script_helper import assert_python_ok +from test.support import impl_detail, check_impl_detail from collections import Hashable IS_64BIT = sys.maxsize > 2**32 @@ -140,6 +141,7 @@ def get_hash_command(self, repr_): return 'print(hash(eval(%a)))' % repr_ + @impl_detail("PyPy does not support hash randomization", pypy=False) def get_hash(self, repr_, seed=None): env = os.environ.copy() env['__cleanenv'] = True # signal to assert_python not to do a copy @@ -161,6 +163,11 @@ self.assertNotEqual(run1, run2) class StringlikeHashRandomizationTests(HashRandomizationTests): + if check_impl_detail(pypy=True): + EMPTY_STRING_HASH = -1 + else: + EMPTY_STRING_HASH = 0 + def test_null_hash(self): # PYTHONHASHSEED=0 disables the randomized hash if IS_64BIT: @@ -194,21 +201,21 @@ repr_ = repr('abc') def test_empty_string(self): - self.assertEqual(hash(""), 0) + self.assertEqual(hash(""), self.EMPTY_STRING_HASH) class BytesHashRandomizationTests(StringlikeHashRandomizationTests, unittest.TestCase): repr_ = repr(b'abc') def test_empty_string(self): - self.assertEqual(hash(b""), 0) + self.assertEqual(hash(b""), self.EMPTY_STRING_HASH) class MemoryviewHashRandomizationTests(StringlikeHashRandomizationTests, unittest.TestCase): repr_ = "memoryview(b'abc')" def test_empty_string(self): - self.assertEqual(hash(memoryview(b"")), 0) + self.assertEqual(hash(memoryview(b"")), self.EMPTY_STRING_HASH) class DatetimeTests(HashRandomizationTests): def get_hash_command(self, repr_): From pypy.commits at gmail.com Fri Apr 15 21:55:05 2016 From: pypy.commits at gmail.com (kunalgrover05) Date: Fri, 15 Apr 2016 18:55:05 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-hashfix: Raise NotImplemented when value not an integer or float in specialisedtuple Message-ID: <57119b79.891d1c0a.b1dfb.0679@mx.google.com> Author: Kunal Grover Branch: py3.3-hashfix Changeset: r83698:da3b47de3e90 Date: 2016-02-19 01:23 +0530 http://bitbucket.org/pypy/pypy/changeset/da3b47de3e90/ Log: Raise NotImplemented when value not an integer or float in specialisedtuple diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -520,21 +520,7 @@ return _new_int(space, w_inttype, w_x, w_base) def descr_hash(self, space): - a = self.intval - sign = 1 - if a < 0: - sign = -1 - a = -a - - x = r_uint(a) - # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne - # prime - x = (x & HASH_MODULUS) + (x >> HASH_BITS) - if x >= HASH_MODULUS: - x -= HASH_MODULUS - - x = intmask(intmask(x) * sign) - return wrapint(space, -2 if x == -1 else x) + return space.wrap(_hash_int(space, self.intval)) def as_w_long(self, space): # XXX: should try smalllong diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.util import negate -from rpython.rlib.objectmodel import compute_hash, specialize +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.tool.sourcetools import func_with_new_name @@ -73,7 +73,8 @@ from pypy.objspace.std.intobject import _hash_int y = _hash_int(space, value) else: - y = compute_hash(value) + raise NotImplementedError + x = (x ^ y) * mult z -= 1 mult += 82520 + z + z From pypy.commits at gmail.com Fri Apr 15 21:55:08 2016 From: pypy.commits at gmail.com (kunalgrover05) Date: Fri, 15 Apr 2016 18:55:08 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-hashfix: Minor fix Message-ID: <57119b7c.e21bc20a.6fe4c.ffffc903@mx.google.com> Author: Kunal Grover Branch: py3.3-hashfix Changeset: r83700:4b8aa2cad808 Date: 2016-02-19 04:00 +0530 http://bitbucket.org/pypy/pypy/changeset/4b8aa2cad808/ Log: Minor fix diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -520,7 +520,7 @@ return _new_int(space, w_inttype, w_x, w_base) def descr_hash(self, space): - return space.wrap(_hash_int(space, self.intval)) + return space.wrap(_hash_int(self.intval)) def as_w_long(self, space): # XXX: should try smalllong @@ -1016,7 +1016,7 @@ ) -def _hash_int(space, a): +def _hash_int(a): sign = 1 if a < 0: sign = -1 diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -71,7 +71,7 @@ # hash for int which is different from the hash # given by rpython from pypy.objspace.std.intobject import _hash_int - y = _hash_int(space, value) + y = _hash_int(value) else: raise NotImplementedError From pypy.commits at gmail.com Fri Apr 15 21:55:03 2016 From: pypy.commits at gmail.com (kunalgrover05) Date: Fri, 15 Apr 2016 18:55:03 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-hashfix: Use intobject hash function for specialisedtuple Message-ID: <57119b77.d3301c0a.79ffa.0bdb@mx.google.com> Author: Kunal Grover Branch: py3.3-hashfix Changeset: r83697:7aa21c0ec926 Date: 2016-02-19 00:54 +0530 http://bitbucket.org/pypy/pypy/changeset/7aa21c0ec926/ Log: Use intobject hash function for specialisedtuple diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -1028,3 +1028,20 @@ __pow__ = interpindirect2app(W_AbstractIntObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow), ) + + +def _hash_int(space, a): + sign = 1 + if a < 0: + sign = -1 + a = -a + + x = r_uint(a) + # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne + # prime + x = (x & HASH_MODULUS) + (x >> HASH_BITS) + if x >= HASH_MODULUS: + x -= HASH_MODULUS + + x = intmask(intmask(x) * sign) + return -2 if x == -1 else x diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -67,6 +67,11 @@ # integer & other less frequent cases from pypy.objspace.std.floatobject import _hash_float y = _hash_float(space, value) + elif typetuple[i] == int: + # hash for int which is different from the hash + # given by rpython + from pypy.objspace.std.intobject import _hash_int + y = _hash_int(space, value) else: y = compute_hash(value) x = (x ^ y) * mult From pypy.commits at gmail.com Fri Apr 15 21:55:12 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 18:55:12 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-hashfix: close branch Message-ID: <57119b80.e21bc20a.6fe4c.ffffc907@mx.google.com> Author: Philip Jenvey Branch: py3.3-hashfix Changeset: r83702:8520044ca6a8 Date: 2016-04-15 18:53 -0700 http://bitbucket.org/pypy/pypy/changeset/8520044ca6a8/ Log: close branch From pypy.commits at gmail.com Fri Apr 15 21:55:07 2016 From: pypy.commits at gmail.com (kunalgrover05) Date: Fri, 15 Apr 2016 18:55:07 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-hashfix: Added test Message-ID: <57119b7b.47afc20a.10eb2.ffffc29a@mx.google.com> Author: Kunal Grover Branch: py3.3-hashfix Changeset: r83699:605e04e0e53d Date: 2016-02-19 03:50 +0530 http://bitbucket.org/pypy/pypy/changeset/605e04e0e53d/ Log: Added test diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -177,6 +177,11 @@ assert hash(a) == hash((1, 2)) == hash((1.0, 2.0)) == hash((1.0, 2)) + d = tuple([-1, 1]) + e = (-1, 1) + assert d == e + assert hash(d) == hash(e) + def test_getitem(self): t = (5, 3) assert (t)[0] == 5 From pypy.commits at gmail.com Fri Apr 15 21:55:10 2016 From: pypy.commits at gmail.com (pjenvey) Date: Fri, 15 Apr 2016 18:55:10 -0700 (PDT) Subject: [pypy-commit] pypy py3k: merge kunalgrover05/pypy/py3.3-hashfix (pull request #402) Message-ID: <57119b7e.82bb1c0a.47bc.0620@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83701:32e47f04869f Date: 2016-04-15 18:52 -0700 http://bitbucket.org/pypy/pypy/changeset/32e47f04869f/ Log: merge kunalgrover05/pypy/py3.3-hashfix (pull request #402) use intobject hash function for specialisedtuple diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -520,21 +520,7 @@ return _new_int(space, w_inttype, w_x, w_base) def descr_hash(self, space): - a = self.intval - sign = 1 - if a < 0: - sign = -1 - a = -a - - x = r_uint(a) - # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne - # prime - x = (x & HASH_MODULUS) + (x >> HASH_BITS) - if x >= HASH_MODULUS: - x -= HASH_MODULUS - - x = intmask(intmask(x) * sign) - return wrapint(space, -2 if x == -1 else x) + return space.wrap(_hash_int(self.intval)) def as_w_long(self, space): # XXX: should try smalllong @@ -1028,3 +1014,20 @@ __pow__ = interpindirect2app(W_AbstractIntObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow), ) + + +def _hash_int(a): + sign = 1 + if a < 0: + sign = -1 + a = -a + + x = r_uint(a) + # efficient x % HASH_MODULUS: as HASH_MODULUS is a Mersenne + # prime + x = (x & HASH_MODULUS) + (x >> HASH_BITS) + if x >= HASH_MODULUS: + x -= HASH_MODULUS + + x = intmask(intmask(x) * sign) + return -2 if x == -1 else x diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.util import negate -from rpython.rlib.objectmodel import compute_hash, specialize +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.tool.sourcetools import func_with_new_name @@ -67,8 +67,14 @@ # integer & other less frequent cases from pypy.objspace.std.floatobject import _hash_float y = _hash_float(space, value) + elif typetuple[i] == int: + # hash for int which is different from the hash + # given by rpython + from pypy.objspace.std.intobject import _hash_int + y = _hash_int(value) else: - y = compute_hash(value) + raise NotImplementedError + x = (x ^ y) * mult z -= 1 mult += 82520 + z + z diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -177,6 +177,11 @@ assert hash(a) == hash((1, 2)) == hash((1.0, 2.0)) == hash((1.0, 2)) + d = tuple([-1, 1]) + e = (-1, 1) + assert d == e + assert hash(d) == hash(e) + def test_getitem(self): t = (5, 3) assert (t)[0] == 5 From pypy.commits at gmail.com Sat Apr 16 11:54:19 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Apr 2016 08:54:19 -0700 (PDT) Subject: [pypy-commit] pypy default: fix for test_file Message-ID: <5712602b.03dd1c0a.3dcc0.037a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83703:c967ca439384 Date: 2016-04-16 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c967ca439384/ Log: fix for test_file diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3518,9 +3518,9 @@ def test_rawstring(): BChar = new_primitive_type("char") BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] - p = newp(BArray, "abc\x00def") - assert rawstring(p) == "abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == "bc\x00de" + p = newp(BArray, b"abc\x00def") + assert rawstring(p) == b"abc\x00def\x00\x00\x00" + assert rawstring(p[1:6]) == b"bc\x00de" BWChar = new_primitive_type("wchar_t") BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] p = newp(BArray, u"abc\x00def") @@ -3529,7 +3529,7 @@ BChar = new_primitive_type("uint8_t") BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] p = newp(BArray, [65 + i for i in range(10)]) - assert rawstring(p) == "ABCDEFGHIJ" + assert rawstring(p) == b"ABCDEFGHIJ" # py.test.raises(TypeError, rawstring, "foobar") py.test.raises(TypeError, rawstring, p + 1) From pypy.commits at gmail.com Sat Apr 16 12:00:41 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Apr 2016 09:00:41 -0700 (PDT) Subject: [pypy-commit] pypy default: fix the detection code (probably) for ARM Message-ID: <571261a9.839a1c0a.67c51.11a4@mx.google.com> Author: Armin Rigo Branch: Changeset: r83704:67b9e6084b96 Date: 2016-04-16 18:00 +0200 http://bitbucket.org/pypy/pypy/changeset/67b9e6084b96/ Log: fix the detection code (probably) for ARM diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4510,7 +4510,10 @@ continue w = t[2].split() if len(w) == 0: - continue + if '' in line: + w = ['UNDEFINED'] + else: + continue words.append(w[0] + ';') print '[[%s]]' % (w[0],) text = ' '.join(words) From pypy.commits at gmail.com Sat Apr 16 17:28:35 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Apr 2016 14:28:35 -0700 (PDT) Subject: [pypy-commit] cffi default: Remove again ffi.rawstring(), and implement instead ffi.unpack(). Message-ID: <5712ae83.c9b0c20a.15d05.5cf4@mx.google.com> Author: Armin Rigo Branch: Changeset: r2662:69aa2d8cb308 Date: 2016-04-16 23:28 +0200 http://bitbucket.org/cffi/cffi/changeset/69aa2d8cb308/ Log: Remove again ffi.rawstring(), and implement instead ffi.unpack(). Pre-documentation notes: (hi Amaury :-) * ffi.unpack(, n) == ffi.buffer(, n)[:] but I hope it is a little bit more natural * ffi.unpack(, n): this is the original motivation, because it has no previous equivalent * ffi.unpack(, n) == list([0:n]) but should be much faster on CPython diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5582,37 +5582,118 @@ return NULL; } -static PyObject *b_rawstring(PyObject *self, PyObject *arg) +static PyObject *b_unpack(PyObject *self, PyObject *args, PyObject *kwds) { CDataObject *cd; CTypeDescrObject *ctitem; - Py_ssize_t length; - - if (!CData_Check(arg)) { - PyErr_SetString(PyExc_TypeError, "expected a 'cdata' object"); + Py_ssize_t i, length, itemsize, best_alignment; + PyObject *result; + char *src; + int casenum; + static char *keywords[] = {"cdata", "length", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!n:unpack", keywords, + &CData_Type, &cd, &length)) return NULL; - } - cd = (CDataObject *)arg; + ctitem = cd->c_type->ct_itemdescr; - if ((cd->c_type->ct_flags & CT_ARRAY) && - (ctitem->ct_flags & (CT_PRIMITIVE_CHAR | - CT_PRIMITIVE_SIGNED | - CT_PRIMITIVE_UNSIGNED))) { - length = get_array_length(cd); + if (!(cd->c_type->ct_flags & (CT_ARRAY|CT_POINTER)) || + !(ctitem->ct_flags & CT_PRIMITIVE_ANY)) { + PyErr_Format(PyExc_TypeError, + "expected a pointer to a primitive type, got '%s'", + cd->c_type->ct_name); + return NULL; + } + if (length < 0) { + PyErr_SetString(PyExc_ValueError, "'length' cannot be negative"); + return NULL; + } + if (cd->c_data == NULL) { + PyObject *s = cdata_repr(cd); + if (s != NULL) { + PyErr_Format(PyExc_RuntimeError, + "cannot use unpack() on %s", + PyText_AS_UTF8(s)); + Py_DECREF(s); + } + return NULL; + } + + /* byte- and unicode strings */ + if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) { if (ctitem->ct_size == sizeof(char)) return PyBytes_FromStringAndSize(cd->c_data, length); #ifdef HAVE_WCHAR_H - else if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) { - assert(ctitem->ct_size == sizeof(wchar_t)); + else if (ctitem->ct_size == sizeof(wchar_t)) return _my_PyUnicode_FromWideChar((wchar_t *)cd->c_data, length); +#endif + } + + /* else, the result is a list. This implementation should be + equivalent to, but on CPython much faster than, 'list(p[0:length])'. + */ + result = PyList_New(length); if (result == NULL) return NULL; + + src = cd->c_data; + itemsize = ctitem->ct_size; + best_alignment = ctitem->ct_length; + + casenum = -1; + if ((best_alignment & (best_alignment - 1)) == 0 && + (((uintptr_t)src) & (best_alignment - 1)) == 0) { + /* Source data is fully aligned; we can directly read without + memcpy(). The unaligned case is expected to be rare; in + this situation it is ok to fall back to the general + convert_to_object() in the loop. For now we also use this + fall-back for types that are too large. + */ + if (ctitem->ct_flags & CT_PRIMITIVE_SIGNED) { + if (itemsize == sizeof(long)) casenum = 3; + else if (itemsize == sizeof(int)) casenum = 2; + else if (itemsize == sizeof(short)) casenum = 1; + else if (itemsize == sizeof(signed char)) casenum = 0; } -#endif - } - PyErr_Format(PyExc_TypeError, - "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " - "or 'wchar_t[]', got '%s'", - cd->c_type->ct_name); - return NULL; + else if (ctitem->ct_flags & CT_PRIMITIVE_UNSIGNED) { + /* Note: we never pick case 6 if sizeof(int) == sizeof(long), + so that case 6 below can assume that the 'unsigned int' result + would always fit in a 'signed long'. */ + if (itemsize == sizeof(unsigned long)) casenum = 7; + else if (itemsize == sizeof(unsigned int)) casenum = 6; + else if (itemsize == sizeof(unsigned short)) casenum = 5; + else if (itemsize == sizeof(unsigned char)) casenum = 4; + } + else if (ctitem->ct_flags & CT_PRIMITIVE_FLOAT) { + if (itemsize == sizeof(double)) casenum = 9; + else if (itemsize == sizeof(float)) casenum = 8; + } + } + + for (i = 0; i < length; i++) { + PyObject *x; + switch (casenum) { + /* general case */ + default: x = convert_to_object(src, ctitem); break; + + /* special cases for performance only */ + case 0: x = PyInt_FromLong(*(signed char *)src); break; + case 1: x = PyInt_FromLong(*(short *)src); break; + case 2: x = PyInt_FromLong(*(int *)src); break; + case 3: x = PyInt_FromLong(*(long *)src); break; + case 4: x = PyInt_FromLong(*(unsigned char *)src); break; + case 5: x = PyInt_FromLong(*(unsigned short *)src); break; + case 6: x = PyInt_FromLong((long)*(unsigned int *)src); break; + case 7: x = PyLong_FromUnsignedLong(*(unsigned long *)src); break; + case 8: x = PyFloat_FromDouble(*(float *)src); break; + case 9: x = PyFloat_FromDouble(*(double *)src); break; + } + if (x == NULL) { + Py_DECREF(result); + return NULL; + } + PyList_SET_ITEM(result, i, x); + src += itemsize; + } + return result; } static PyObject *b_buffer(PyObject *self, PyObject *args, PyObject *kwds) @@ -6258,7 +6339,7 @@ {"rawaddressof", b_rawaddressof, METH_VARARGS}, {"getcname", b_getcname, METH_VARARGS}, {"string", (PyCFunction)b_string, METH_VARARGS | METH_KEYWORDS}, - {"rawstring", b_rawstring, METH_O}, + {"unpack", (PyCFunction)b_unpack, METH_VARARGS | METH_KEYWORDS}, {"buffer", (PyCFunction)b_buffer, METH_VARARGS | METH_KEYWORDS}, {"get_errno", b_get_errno, METH_NOARGS}, {"set_errno", b_set_errno, METH_O}, diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -459,18 +459,21 @@ #define ffi_string b_string /* ffi_string() => b_string() from _cffi_backend.c */ -PyDoc_STRVAR(ffi_rawstring_doc, -"Convert a cdata that is an array of 'char' or 'wchar_t' to\n" -"a byte or unicode string. Unlike ffi.string(), it does not stop\n" -"at the first null.\n" +PyDoc_STRVAR(ffi_unpack_doc, +"Unpack an array of primitive C data of the given length,\n" +"returning a Python string/unicode/list.\n" "\n" -"Note that if you have a pointer and an explicit length, you\n" -"can use 'p[0:length]' to make an array view. This is similar to\n" -"the construct 'list(p[0:length])', which returns a list of chars/\n" -"unichars/ints/floats."); +"If 'cdata' is a pointer to 'char', returns a byte string.\n" +"Unlike ffi.string(), it does not stop at the first null.\n" +"\n" +"If 'cdata' is a pointer to 'wchar_t', returns a unicode string.\n" +"'length' is measured in wchar_t's; it is not the size in bytes.\n" +"\n" +"If 'cdata' is a pointer to some other integer or floating-point\n" +"type, returns a list of 'length' integers or floats."); -#define ffi_rawstring b_rawstring /* ffi_rawstring() => b_rawstring() - from _cffi_backend.c */ +#define ffi_unpack b_unpack /* ffi_unpack() => b_unpack() + from _cffi_backend.c */ PyDoc_STRVAR(ffi_buffer_doc, "Return a read-write buffer object that references the raw C data\n" @@ -1103,10 +1106,10 @@ {"new_allocator",(PyCFunction)ffi_new_allocator,METH_VKW,ffi_new_allocator_doc}, {"new_handle", (PyCFunction)ffi_new_handle, METH_O, ffi_new_handle_doc}, {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, - {"rawstring", (PyCFunction)ffi_rawstring, METH_O, ffi_rawstring_doc}, {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, {"string", (PyCFunction)ffi_string, METH_VKW, ffi_string_doc}, {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, + {"unpack", (PyCFunction)ffi_unpack, METH_VKW, ffi_unpack_doc}, {NULL} }; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3526,21 +3526,48 @@ _get_common_types(d) assert d['bool'] == '_Bool' -def test_rawstring(): +def test_unpack(): BChar = new_primitive_type("char") BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] p = newp(BArray, b"abc\x00def") - assert rawstring(p) == b"abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == b"bc\x00de" + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" BWChar = new_primitive_type("wchar_t") BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] p = newp(BArray, u"abc\x00def") - assert rawstring(p) == u"abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == u"bc\x00de" - BChar = new_primitive_type("uint8_t") - BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] - p = newp(BArray, [65 + i for i in range(10)]) - assert rawstring(p) == b"ABCDEFGHIJ" + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) # - py.test.raises(TypeError, rawstring, "foobar") - py.test.raises(TypeError, rawstring, p + 1) + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + BFunc = new_function_type((BInt, BInt), BInt, False) + py.test.raises(TypeError, unpack, cast(new_pointer_type(BFunc), 42), 1) + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -299,17 +299,20 @@ """ return self._backend.string(cdata, maxlen) - def rawstring(self, cdata): - """Convert a cdata that is an array of 'char' or 'wchar_t' to - a byte or unicode string. Unlike ffi.string(), it does not stop - at the first null. + def unpack(self, cdata, length): + """Unpack an array of primitive C data of the given length, + returning a Python string/unicode/list. - Note that if you have a pointer and an explicit length, you - can use 'p[0:length]' to make an array view. This is similar to - the construct 'list(p[0:length])', which returns a list of chars/ - unichars/ints/floats. + If 'cdata' is a pointer to 'char', returns a byte string. + Unlike ffi.string(), it does not stop at the first null. + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to some other integer or floating-point + type, returns a list of 'length' integers or floats. """ - return self._backend.rawstring(cdata) + return self._backend.unpack(cdata, length) def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -473,11 +473,9 @@ ['a', 'cc', 'ccc'], ['aa', 'aaa', 'g']) - def test_rawstring(self): + def test_unpack(self): ffi = FFI() p = ffi.new("char[]", b"abc\x00def") - assert ffi.rawstring(p) == b"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == b"bc\x00de" - p = ffi.new("wchar_t[]", u"abc\x00def") - assert ffi.rawstring(p) == u"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == u"bc\x00de" + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -496,14 +496,9 @@ time.sleep(0.51) assert seen == ['init!', 'oops'] * 3 -def test_rawstring(): +def test_unpack(): ffi = _cffi1_backend.FFI() p = ffi.new("char[]", b"abc\x00def") - assert ffi.rawstring(p) == b"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == b"bc\x00de" - p = ffi.new("wchar_t[]", u"abc\x00def") - assert ffi.rawstring(p) == u"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == u"bc\x00de" - # - py.test.raises(TypeError, ffi.rawstring, "foobar") - py.test.raises(TypeError, ffi.rawstring, p + 1) + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] From pypy.commits at gmail.com Sat Apr 16 18:17:53 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Apr 2016 15:17:53 -0700 (PDT) Subject: [pypy-commit] cffi default: There is no reason to restrict ffi.unpack() to primitives. Message-ID: <5712ba11.c4efc20a.1137f.1b04@mx.google.com> Author: Armin Rigo Branch: Changeset: r2663:f8852464e468 Date: 2016-04-17 00:18 +0200 http://bitbucket.org/cffi/cffi/changeset/f8852464e468/ Log: There is no reason to restrict ffi.unpack() to primitives. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5586,7 +5586,7 @@ { CDataObject *cd; CTypeDescrObject *ctitem; - Py_ssize_t i, length, itemsize, best_alignment; + Py_ssize_t i, length, itemsize; PyObject *result; char *src; int casenum; @@ -5596,11 +5596,9 @@ &CData_Type, &cd, &length)) return NULL; - ctitem = cd->c_type->ct_itemdescr; - if (!(cd->c_type->ct_flags & (CT_ARRAY|CT_POINTER)) || - !(ctitem->ct_flags & CT_PRIMITIVE_ANY)) { + if (!(cd->c_type->ct_flags & (CT_ARRAY|CT_POINTER))) { PyErr_Format(PyExc_TypeError, - "expected a pointer to a primitive type, got '%s'", + "expected a pointer or array, got '%s'", cd->c_type->ct_name); return NULL; } @@ -5620,6 +5618,7 @@ } /* byte- and unicode strings */ + ctitem = cd->c_type->ct_itemdescr; if (ctitem->ct_flags & CT_PRIMITIVE_CHAR) { if (ctitem->ct_size == sizeof(char)) return PyBytes_FromStringAndSize(cd->c_data, length); @@ -5630,17 +5629,34 @@ } /* else, the result is a list. This implementation should be - equivalent to, but on CPython much faster than, 'list(p[0:length])'. + equivalent to but much faster than '[p[i] for i in range(length)]'. + (Note that on PyPy, 'list(p[0:length])' should be equally fast, + but arguably, finding out that there *is* such an unexpected way + to write things down is the real problem.) */ - result = PyList_New(length); if (result == NULL) return NULL; + result = PyList_New(length); + if (result == NULL) + return NULL; src = cd->c_data; itemsize = ctitem->ct_size; - best_alignment = ctitem->ct_length; + if (itemsize < 0) { + PyErr_Format(PyExc_ValueError, "'%s' points to items of unknown size", + cd->c_type->ct_name); + return NULL; + } + + /* Determine some common fast-paths for the loop below. The case -1 + is the fall-back, which always gives the right answer. */ + +#define ALIGNMENT_CHECK(align) \ + (((align) & ((align) - 1)) == 0 && \ + (((uintptr_t)src) & ((align) - 1)) == 0) casenum = -1; - if ((best_alignment & (best_alignment - 1)) == 0 && - (((uintptr_t)src) & (best_alignment - 1)) == 0) { + + if ((ctitem->ct_flags & CT_PRIMITIVE_ANY) && + ALIGNMENT_CHECK(ctitem->ct_length)) { /* Source data is fully aligned; we can directly read without memcpy(). The unaligned case is expected to be rare; in this situation it is ok to fall back to the general @@ -5667,6 +5683,10 @@ else if (itemsize == sizeof(float)) casenum = 8; } } + else if (ctitem->ct_flags & (CT_POINTER | CT_FUNCTIONPTR)) { + casenum = 10; /* any pointer */ + } +#undef ALIGNMENT_CHECK for (i = 0; i < length; i++) { PyObject *x; @@ -5685,6 +5705,7 @@ case 7: x = PyLong_FromUnsignedLong(*(unsigned long *)src); break; case 8: x = PyFloat_FromDouble(*(float *)src); break; case 9: x = PyFloat_FromDouble(*(double *)src); break; + case 10: x = new_simple_cdata(*(char **)src, ctitem); break; } if (x == NULL) { Py_DECREF(result); diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -460,17 +460,19 @@ from _cffi_backend.c */ PyDoc_STRVAR(ffi_unpack_doc, -"Unpack an array of primitive C data of the given length,\n" +"Unpack an array of C data of the given length,\n" "returning a Python string/unicode/list.\n" "\n" "If 'cdata' is a pointer to 'char', returns a byte string.\n" -"Unlike ffi.string(), it does not stop at the first null.\n" +"It does not stop at the first null. This is equivalent to:\n" +"ffi.buffer(cdata, length)[:]\n" "\n" "If 'cdata' is a pointer to 'wchar_t', returns a unicode string.\n" "'length' is measured in wchar_t's; it is not the size in bytes.\n" "\n" -"If 'cdata' is a pointer to some other integer or floating-point\n" -"type, returns a list of 'length' integers or floats."); +"If 'cdata' is a pointer to anything else, returns a list of\n" +"'length' items. This is a faster equivalent to:\n" +"[cdata[i] for i in range(length)]"); #define ffi_unpack b_unpack /* ffi_unpack() => b_unpack() from _cffi_backend.c */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3563,8 +3563,30 @@ py.test.raises(TypeError, unpack, p) py.test.raises(TypeError, unpack, b"foobar", 6) py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # BFunc = new_function_type((BInt, BInt), BInt, False) - py.test.raises(TypeError, unpack, cast(new_pointer_type(BFunc), 42), 1) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + lst = unpack(newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]), 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 # py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -300,17 +300,19 @@ return self._backend.string(cdata, maxlen) def unpack(self, cdata, length): - """Unpack an array of primitive C data of the given length, + """Unpack an array of C data of the given length, returning a Python string/unicode/list. If 'cdata' is a pointer to 'char', returns a byte string. - Unlike ffi.string(), it does not stop at the first null. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] If 'cdata' is a pointer to 'wchar_t', returns a unicode string. 'length' is measured in wchar_t's; it is not the size in bytes. - If 'cdata' is a pointer to some other integer or floating-point - type, returns a list of 'length' integers or floats. + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] """ return self._backend.unpack(cdata, length) From pypy.commits at gmail.com Sat Apr 16 23:26:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 16 Apr 2016 20:26:12 -0700 (PDT) Subject: [pypy-commit] pypy default: Try to fix OS X translation Message-ID: <57130254.d3161c0a.59fa0.3c9c@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83705:ce2cd933a2cf Date: 2016-04-17 04:25 +0100 http://bitbucket.org/pypy/pypy/changeset/ce2cd933a2cf/ Log: Try to fix OS X translation diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -36,10 +36,9 @@ # - ALL_STAT_FIELDS contains Float fields if the system can retrieve # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. - -try: +if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): from rpython.rlib.rposix import TIMESPEC -except ImportError: +else: TIMESPEC = None From pypy.commits at gmail.com Sun Apr 17 03:53:54 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 00:53:54 -0700 (PDT) Subject: [pypy-commit] cffi default: Oops. In this case, unpack(p) returns a list of that Message-ID: <57134112.aa5ec20a.1a60a.ffff9249@mx.google.com> Author: Armin Rigo Branch: Changeset: r2664:129bed02b1c3 Date: 2016-04-17 09:54 +0200 http://bitbucket.org/cffi/cffi/changeset/129bed02b1c3/ Log: Oops. In this case, unpack(p) returns a list of that are each inside 'p'. So 'p' must be kept alive diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3584,7 +3584,8 @@ assert str(e.value) == "'foo *' points to items of unknown size" complete_struct_or_union(BStruct, [('a1', BInt, -1), ('a2', BInt, -1)]) - lst = unpack(newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]), 2) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) assert typeof(lst[0]) is BStruct assert lst[0].a1 == 4 and lst[1].a2 == 7 # From pypy.commits at gmail.com Sun Apr 17 04:24:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 01:24:33 -0700 (PDT) Subject: [pypy-commit] pypy default: Implement ffi.unpack(); the fast-paths are coming next Message-ID: <57134841.47afc20a.10eb2.ffffa97a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83706:343cbe027c00 Date: 2016-04-17 09:55 +0200 http://bitbucket.org/pypy/pypy/changeset/343cbe027c00/ Log: Implement ffi.unpack(); the fast-paths are coming next diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -48,7 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', - 'rawstring': 'func.rawstring', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -367,6 +367,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -109,21 +109,6 @@ def typeoffsetof_index(self, index): return self.ctptr.typeoffsetof_index(index) - def rawstring(self, w_cdata): - if isinstance(self.ctitem, ctypeprim.W_CTypePrimitive): - space = self.space - length = w_cdata.get_array_length() - if self.ctitem.size == rffi.sizeof(lltype.Char): - with w_cdata as ptr: - s = rffi.charpsize2str(ptr, length) - return space.wrapbytes(s) - elif self.is_unichar_ptr_or_array(): - with w_cdata as ptr: - cdata = rffi.cast(rffi.CWCHARP, ptr) - u = rffi.wcharpsize2unicode(cdata, length) - return space.wrap(u) - return W_CTypePtrOrArray.rawstring(self, w_cdata) - class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -127,11 +127,20 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) - def rawstring(self, cdataobj): + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists space = self.space - raise oefmt(space.w_TypeError, - "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " - "or 'wchar_t[]', got '%s'", self.name) + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) def add(self, cdata, i): space = self.space diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -125,6 +125,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +175,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,19 +542,23 @@ return w_cdata.ctype.string(w_cdata, maxlen) - @unwrap_spec(w_cdata=W_CData) - def descr_rawstring(self, w_cdata): - """\ -Convert a cdata that is an array of 'char' or 'wchar_t' to -a byte or unicode string. Unlike ffi.string(), it does not stop -at the first null. + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. -Note that if you have a pointer and an explicit length, you -can use 'p[0:length]' to make an array view. This is similar to -the construct 'list(p[0:length])', which returns a list of chars/ -unichars/ints/floats.""" +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" # - return w_cdata.ctype.rawstring(w_cdata) + return w_cdata.unpack(length) def descr_sizeof(self, w_arg): @@ -751,8 +755,8 @@ new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), offsetof = interp2app(W_FFIObject.descr_offsetof), - rawstring = interp2app(W_FFIObject.descr_rawstring), sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), **_extras) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,9 +78,9 @@ # ____________________________________________________________ - at unwrap_spec(w_cdata=cdataobj.W_CData) -def rawstring(space, w_cdata): - return w_cdata.ctype.rawstring(w_cdata) + at unwrap_spec(w_cdata=cdataobj.W_CData, length=int) +def unpack(space, w_cdata, length): + return w_cdata.unpack(length) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3515,21 +3515,71 @@ _get_common_types(d) assert d['bool'] == '_Bool' -def test_rawstring(): +def test_unpack(): BChar = new_primitive_type("char") BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] p = newp(BArray, b"abc\x00def") - assert rawstring(p) == b"abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == b"bc\x00de" + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" BWChar = new_primitive_type("wchar_t") BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] p = newp(BArray, u"abc\x00def") - assert rawstring(p) == u"abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == u"bc\x00de" - BChar = new_primitive_type("uint8_t") - BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] - p = newp(BArray, [65 + i for i in range(10)]) - assert rawstring(p) == b"ABCDEFGHIJ" + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) # - py.test.raises(TypeError, rawstring, "foobar") - py.test.raises(TypeError, rawstring, p + 1) + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # + BFunc = new_function_type((BInt, BInt), BInt, False) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -477,15 +477,10 @@ raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) - def test_rawstring(self): + def test_unpack(self): import _cffi_backend as _cffi1_backend ffi = _cffi1_backend.FFI() - p = ffi.new("char[]", "abc\x00def") - assert ffi.rawstring(p) == "abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == "bc\x00de" - p = ffi.new("wchar_t[]", u"abc\x00def") - assert ffi.rawstring(p) == u"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == u"bc\x00de" - # - raises(TypeError, ffi.rawstring, "foobar") - raises(TypeError, ffi.rawstring, p + 1) + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] From pypy.commits at gmail.com Sun Apr 17 04:24:37 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 01:24:37 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <57134845.a272c20a.1d26c.ffff81d8@mx.google.com> Author: Armin Rigo Branch: Changeset: r83708:d18adeb45be8 Date: 2016-04-17 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d18adeb45be8/ Log: merge heads diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -36,10 +36,9 @@ # - ALL_STAT_FIELDS contains Float fields if the system can retrieve # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. - -try: +if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): from rpython.rlib.rposix import TIMESPEC -except ImportError: +else: TIMESPEC = None From pypy.commits at gmail.com Sun Apr 17 04:24:35 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 01:24:35 -0700 (PDT) Subject: [pypy-commit] pypy default: Fast paths for unpack() Message-ID: <57134843.c9b0c20a.15d05.ffffe289@mx.google.com> Author: Armin Rigo Branch: Changeset: r83707:191f30d6a23d Date: 2016-04-17 10:22 +0200 http://bitbucket.org/pypy/pypy/changeset/191f30d6a23d/ Log: Fast paths for unpack() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1029,6 +1029,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrap(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -229,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -321,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -399,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -429,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -206,6 +206,12 @@ storage = strategy.erase(list_i) return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_float(space, list_f): + strategy = space.fromcache(FloatListStrategy) + storage = strategy.erase(list_f) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + def __repr__(self): """ representation for debugging purposes """ return "%s(%s, %s)" % (self.__class__.__name__, self.strategy, diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -294,6 +294,9 @@ def newlist_int(self, list_i): return W_ListObject.newlist_int(self, list_i) + def newlist_float(self, list_f): + return W_ListObject.newlist_float(self, list_f) + def newdict(self, module=False, instance=False, kwargs=False, strdict=False): return W_DictMultiObject.allocate_and_init_instance( From pypy.commits at gmail.com Sun Apr 17 10:06:35 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sun, 17 Apr 2016 07:06:35 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: Implemented W_Broadcast for numpy.broadcast Message-ID: <5713986b.972e1c0a.de35b.6bf9@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast Changeset: r83709:bbb4848772d2 Date: 2016-04-08 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bbb4848772d2/ Log: Implemented W_Broadcast for numpy.broadcast diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', 'nditer': 'nditer.W_NDIter', + 'broadcast': 'broadcast.W_Broadcast', 'set_docstring': 'support.descr_set_docstring', 'VisibleDeprecationWarning': 'support.W_VisibleDeprecationWarning', diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/broadcast.py @@ -0,0 +1,100 @@ +import operator + +import pypy.module.micronumpy.constants as NPY +from nditer import ConcreteIter, parse_op_flag, parse_op_arg +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject +from rpython.rlib import jit +from strides import calculate_broadcast_strides, shape_agreement_multiple + + +class W_Broadcast(W_NumpyObject): + """ + Implementation of numpy.broadcast. + This class is a simplified version of nditer.W_NDIter with fixed iteration for broadcasted arrays. + """ + + @staticmethod + def descr_new_broadcast(space, w_subtype, __args__): + return W_Broadcast(space, __args__.arguments_w) + + def __init__(self, space, w_args): + self.seq = [convert_to_array(space, w_elem) + for w_elem in w_args] + + self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, + len(self.seq), parse_op_flag) + + self.shape = tuple(shape_agreement_multiple(space, self.seq, shape=None)) + self.order = NPY.CORDER + + self.iters = [] + self.index = 0 + self.size = reduce(operator.mul, self.shape, 1) + for i in range(len(self.seq)): + it = self.get_iter(space, i) + it.contiguous = False + self.iters.append((it, it.reset())) + + self.done = False + pass + + def get_iter(self, space, i): + arr = self.seq[i] + imp = arr.implementation + if arr.is_scalar(): + return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + + backward = imp.order != self.order + + r = calculate_broadcast_strides(imp.strides, imp.backstrides, imp.shape, + shape, backward) + + iter_shape = shape + if len(shape) != len(r[0]): + # shape can be shorter when using an external loop, just return a view + iter_shape = imp.shape + return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1], + self.op_flags[i], self) + + def descr_iter(self, space): + return space.wrap(self) + + def descr_get_shape(self, space): + return space.wrap(self.shape) + + def descr_get_size(self, space): + return space.wrap(self.size) + + def descr_get_index(self, space): + return space.wrap(self.index) + + @jit.unroll_safe + def descr_next(self, space): + if self.index >= self.size: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + res = [] + for i, (it, st) in enumerate(self.iters): + res.append(self._get_item(it, st)) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: + return res[0] + return space.newtuple(res) + + def _get_item(self, it, st): + return W_NDimArray(it.getoperand(st)) + + +W_Broadcast.typedef = TypeDef("numpy.broadcast", + __new__=interp2app(W_Broadcast.descr_new_broadcast), + __iter__=interp2app(W_Broadcast.descr_iter), + next=interp2app(W_Broadcast.descr_next), + shape=GetSetProperty(W_Broadcast.descr_get_shape), + size=GetSetProperty(W_Broadcast.descr_get_size), + index=GetSetProperty(W_Broadcast.descr_get_index), + ) diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -0,0 +1,75 @@ +# -*- encoding: utf-8 -*- + +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestArrayBroadcast(BaseNumpyAppTest): + def test_broadcast_for_row_and_column(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + + def test_broadcast_properties(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = np.broadcast(x, y) + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 0 + + b.next() + b.next() + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 2 + + def test_broadcast_from_doctest(self): + """ + Test from numpy.broadcast doctest. + """ + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5, 6]) + reference = np.array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + b = np.broadcast(x, y) + out = np.empty(b.shape) + out.flat = [u + v for (u, v) in b] + + assert (reference == out).all() + assert out.dtype == reference.dtype + assert b.shape == reference.shape + + def test_broadcast_linear(self): + import numpy as np + x = np.array([1, 2, 3]) + y = np.array([4, 5, 6]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (2, 5), (3, 6)] + assert b[0][0].dtype == x.dtype + + def test_broadcast_linear_unequal(self): + import numpy as np + x = np.array([1, 2, 3]) + y = np.array([4, 5]) + raises(ValueError, np.broadcast, x, y) + + def test_broadcast_3_args(self): + import numpy as np + x = np.array([[[1]], [[2]], [[3]]]) + y = np.array([[[40], [50]]]) + z = np.array([[[700, 800]]]) + + b = list(np.broadcast(x, y, z)) + + assert b == [(1, 40, 700), (1, 40, 800), (1, 50, 700), (1, 50, 800), + (2, 40, 700), (2, 40, 800), (2, 50, 700), (2, 50, 800), + (3, 40, 700), (3, 40, 800), (3, 50, 700), (3, 50, 800)] + From pypy.commits at gmail.com Sun Apr 17 10:06:39 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sun, 17 Apr 2016 07:06:39 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: In W_Broadcast (micronumpy) added check for number of arguments, added numiter property Message-ID: <5713986f.26b0c20a.f195b.119e@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast Changeset: r83711:7f688202899a Date: 2016-04-08 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7f688202899a/ Log: In W_Broadcast (micronumpy) added check for number of arguments, added numiter property diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -19,9 +19,14 @@ def descr_new_broadcast(space, w_subtype, __args__): return W_Broadcast(space, __args__.arguments_w) - def __init__(self, space, w_args): + def __init__(self, space, args): + num_args = len(args) + if not (2 <= num_args <= NPY.MAXARGS): + raise OperationError(space.w_ValueError, + space.wrap("Need at least two and fewer than (%d) array objects." % NPY.MAXARGS)) + self.seq = [convert_to_array(space, w_elem) - for w_elem in w_args] + for w_elem in args] self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, len(self.seq), parse_op_flag) @@ -72,6 +77,9 @@ def descr_get_index(self, space): return space.wrap(self.index) + def descr_get_numiter(self, space): + return space.wrap(len(self.iters)) + @jit.unroll_safe def descr_next(self, space): if self.index >= self.size: @@ -97,4 +105,5 @@ shape=GetSetProperty(W_Broadcast.descr_get_shape), size=GetSetProperty(W_Broadcast.descr_get_size), index=GetSetProperty(W_Broadcast.descr_get_index), + numiter=GetSetProperty(W_Broadcast.descr_get_numiter), ) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -77,6 +77,8 @@ WRAP = 1 RAISE = 2 +MAXARGS = 32 + # These can be requested in constructor functions and tested for ARRAY_C_CONTIGUOUS = 0x0001 ARRAY_F_CONTIGUOUS = 0x0002 diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -73,3 +73,16 @@ (2, 40, 700), (2, 40, 800), (2, 50, 700), (2, 50, 800), (3, 40, 700), (3, 40, 800), (3, 50, 700), (3, 50, 800)] + def test_number_of_arguments(self): + """ + Test from numpy unit tests. + """ + import numpy as np + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j < 2 or j > 32: + raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert mit.numiter == j From pypy.commits at gmail.com Sun Apr 17 10:06:41 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:06:41 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: test, implement overflow checking; small cleanups Message-ID: <57139871.c49a1c0a.75623.6976@mx.google.com> Author: mattip Branch: numpy_broadcast Changeset: r83712:16f4a95d75ee Date: 2016-04-17 16:33 +0300 http://bitbucket.org/pypy/pypy/changeset/16f4a95d75ee/ Log: test, implement overflow checking; small cleanups diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -1,6 +1,6 @@ import pypy.module.micronumpy.constants as NPY from nditer import ConcreteIter, parse_op_flag, parse_op_arg -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import support @@ -8,6 +8,8 @@ from rpython.rlib import jit from strides import calculate_broadcast_strides, shape_agreement_multiple +def descr_new_broadcast(space, w_subtype, __args__): + return W_Broadcast(space, __args__.arguments_w) class W_Broadcast(W_NumpyObject): """ @@ -15,15 +17,11 @@ This class is a simplified version of nditer.W_NDIter with fixed iteration for broadcasted arrays. """ - @staticmethod - def descr_new_broadcast(space, w_subtype, __args__): - return W_Broadcast(space, __args__.arguments_w) - def __init__(self, space, args): num_args = len(args) if not (2 <= num_args <= NPY.MAXARGS): - raise OperationError(space.w_ValueError, - space.wrap("Need at least two and fewer than (%d) array objects." % NPY.MAXARGS)) + raise oefmt(space.w_ValueError, + "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) self.seq = [convert_to_array(space, w_elem) for w_elem in args] @@ -37,7 +35,10 @@ self.iters = [] self.index = 0 - self.size = support.product(self.shape) + try: + self.size = support.product_check(self.shape) + except OverflowError as e: + raise oefmt(space.w_ValueError, "broadcast dimensions too large.") for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False @@ -99,7 +100,7 @@ W_Broadcast.typedef = TypeDef("numpy.broadcast", - __new__=interp2app(W_Broadcast.descr_new_broadcast), + __new__=interp2app(descr_new_broadcast), __iter__=interp2app(W_Broadcast.descr_iter), next=interp2app(W_Broadcast.descr_next), shape=GetSetProperty(W_Broadcast.descr_get_shape), diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -55,11 +55,19 @@ assert b == [(1, 4), (2, 5), (3, 6)] assert b[0][0].dtype == x.dtype - def test_broadcast_linear_unequal(self): + def test_broadcast_failures(self): import numpy as np + import sys x = np.array([1, 2, 3]) y = np.array([4, 5]) raises(ValueError, np.broadcast, x, y) + a = np.empty(2**16,dtype='int8') + a = a.reshape(-1, 1, 1, 1) + b = a.reshape(1, -1, 1, 1) + c = a.reshape(1, 1, -1, 1) + d = a.reshape(1, 1, 1, -1) + exc = raises(ValueError, np.broadcast, a, b, c, d) + assert exc.value[0] == ('broadcast dimensions too large.') def test_broadcast_3_args(self): import numpy as np @@ -82,7 +90,8 @@ for j in range(35): arrs = [arr] * j if j < 2 or j > 32: - raises(ValueError, np.broadcast, *arrs) + exc = raises(ValueError, np.broadcast, *arrs) + assert exc.value[0] == ('Need at least two and fewer than (32) array objects.') else: mit = np.broadcast(*arrs) assert mit.numiter == j From pypy.commits at gmail.com Sun Apr 17 10:06:37 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sun, 17 Apr 2016 07:06:37 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: Fixed compiling of W_Broadcast Message-ID: <5713986d.a82cc20a.8a291.ffff8899@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast Changeset: r83710:7680be59c5aa Date: 2016-04-08 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/7680be59c5aa/ Log: Fixed compiling of W_Broadcast -fixed tuple conversion -removed non-rpython reduce() call diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -1,10 +1,9 @@ -import operator - import pypy.module.micronumpy.constants as NPY from nditer import ConcreteIter, parse_op_flag, parse_op_arg from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy import support from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject from rpython.rlib import jit from strides import calculate_broadcast_strides, shape_agreement_multiple @@ -27,12 +26,13 @@ self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, len(self.seq), parse_op_flag) - self.shape = tuple(shape_agreement_multiple(space, self.seq, shape=None)) + self.shape = shape_agreement_multiple(space, self.seq, shape=None) self.order = NPY.CORDER self.iters = [] self.index = 0 - self.size = reduce(operator.mul, self.shape, 1) + + self.size = support.product(self.shape) for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False @@ -64,7 +64,7 @@ return space.wrap(self) def descr_get_shape(self, space): - return space.wrap(self.shape) + return space.newtuple([space.wrap(i) for i in self.shape]) def descr_get_size(self, space): return space.wrap(self.size) From pypy.commits at gmail.com Sun Apr 17 10:06:47 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:06:47 -0700 (PDT) Subject: [pypy-commit] pypy default: merge branch which provides numpy.broadcast Message-ID: <57139877.d3301c0a.79ffa.662a@mx.google.com> Author: mattip Branch: Changeset: r83715:bbef742722ea Date: 2016-04-17 16:37 +0300 http://bitbucket.org/pypy/pypy/changeset/bbef742722ea/ Log: merge branch which provides numpy.broadcast diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', 'nditer': 'nditer.W_NDIter', + 'broadcast': 'broadcast.W_Broadcast', 'set_docstring': 'support.descr_set_docstring', 'VisibleDeprecationWarning': 'support.W_VisibleDeprecationWarning', diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/broadcast.py @@ -0,0 +1,110 @@ +import pypy.module.micronumpy.constants as NPY +from nditer import ConcreteIter, parse_op_flag, parse_op_arg +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy import support +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject +from rpython.rlib import jit +from strides import calculate_broadcast_strides, shape_agreement_multiple + +def descr_new_broadcast(space, w_subtype, __args__): + return W_Broadcast(space, __args__.arguments_w) + +class W_Broadcast(W_NumpyObject): + """ + Implementation of numpy.broadcast. + This class is a simplified version of nditer.W_NDIter with fixed iteration for broadcasted arrays. + """ + + def __init__(self, space, args): + num_args = len(args) + if not (2 <= num_args <= NPY.MAXARGS): + raise oefmt(space.w_ValueError, + "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) + + self.seq = [convert_to_array(space, w_elem) + for w_elem in args] + + self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, + len(self.seq), parse_op_flag) + + self.shape = shape_agreement_multiple(space, self.seq, shape=None) + self.order = NPY.CORDER + + self.iters = [] + self.index = 0 + + try: + self.size = support.product_check(self.shape) + except OverflowError as e: + raise oefmt(space.w_ValueError, "broadcast dimensions too large.") + for i in range(len(self.seq)): + it = self.get_iter(space, i) + it.contiguous = False + self.iters.append((it, it.reset())) + + self.done = False + pass + + def get_iter(self, space, i): + arr = self.seq[i] + imp = arr.implementation + if arr.is_scalar(): + return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + + backward = imp.order != self.order + + r = calculate_broadcast_strides(imp.strides, imp.backstrides, imp.shape, + shape, backward) + + iter_shape = shape + if len(shape) != len(r[0]): + # shape can be shorter when using an external loop, just return a view + iter_shape = imp.shape + return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1], + self.op_flags[i], self) + + def descr_iter(self, space): + return space.wrap(self) + + def descr_get_shape(self, space): + return space.newtuple([space.wrap(i) for i in self.shape]) + + def descr_get_size(self, space): + return space.wrap(self.size) + + def descr_get_index(self, space): + return space.wrap(self.index) + + def descr_get_numiter(self, space): + return space.wrap(len(self.iters)) + + @jit.unroll_safe + def descr_next(self, space): + if self.index >= self.size: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + res = [] + for i, (it, st) in enumerate(self.iters): + res.append(self._get_item(it, st)) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: + return res[0] + return space.newtuple(res) + + def _get_item(self, it, st): + return W_NDimArray(it.getoperand(st)) + + +W_Broadcast.typedef = TypeDef("numpy.broadcast", + __new__=interp2app(descr_new_broadcast), + __iter__=interp2app(W_Broadcast.descr_iter), + next=interp2app(W_Broadcast.descr_next), + shape=GetSetProperty(W_Broadcast.descr_get_shape), + size=GetSetProperty(W_Broadcast.descr_get_size), + index=GetSetProperty(W_Broadcast.descr_get_index), + numiter=GetSetProperty(W_Broadcast.descr_get_numiter), + ) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -77,6 +77,8 @@ WRAP = 1 RAISE = 2 +MAXARGS = 32 + # These can be requested in constructor functions and tested for ARRAY_C_CONTIGUOUS = 0x0001 ARRAY_F_CONTIGUOUS = 0x0002 diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -0,0 +1,97 @@ +# -*- encoding: utf-8 -*- + +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestArrayBroadcast(BaseNumpyAppTest): + def test_broadcast_for_row_and_column(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + + def test_broadcast_properties(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = np.broadcast(x, y) + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 0 + + b.next() + b.next() + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 2 + + def test_broadcast_from_doctest(self): + """ + Test from numpy.broadcast doctest. + """ + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5, 6]) + reference = np.array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + b = np.broadcast(x, y) + out = np.empty(b.shape) + out.flat = [u + v for (u, v) in b] + + assert (reference == out).all() + assert out.dtype == reference.dtype + assert b.shape == reference.shape + + def test_broadcast_linear(self): + import numpy as np + x = np.array([1, 2, 3]) + y = np.array([4, 5, 6]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (2, 5), (3, 6)] + assert b[0][0].dtype == x.dtype + + def test_broadcast_failures(self): + import numpy as np + import sys + x = np.array([1, 2, 3]) + y = np.array([4, 5]) + raises(ValueError, np.broadcast, x, y) + a = np.empty(2**16,dtype='int8') + a = a.reshape(-1, 1, 1, 1) + b = a.reshape(1, -1, 1, 1) + c = a.reshape(1, 1, -1, 1) + d = a.reshape(1, 1, 1, -1) + exc = raises(ValueError, np.broadcast, a, b, c, d) + assert exc.value[0] == ('broadcast dimensions too large.') + + def test_broadcast_3_args(self): + import numpy as np + x = np.array([[[1]], [[2]], [[3]]]) + y = np.array([[[40], [50]]]) + z = np.array([[[700, 800]]]) + + b = list(np.broadcast(x, y, z)) + + assert b == [(1, 40, 700), (1, 40, 800), (1, 50, 700), (1, 50, 800), + (2, 40, 700), (2, 40, 800), (2, 50, 700), (2, 50, 800), + (3, 40, 700), (3, 40, 800), (3, 50, 700), (3, 50, 800)] + + def test_number_of_arguments(self): + """ + Test from numpy unit tests. + """ + import numpy as np + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j < 2 or j > 32: + exc = raises(ValueError, np.broadcast, *arrs) + assert exc.value[0] == ('Need at least two and fewer than (32) array objects.') + else: + mit = np.broadcast(*arrs) + assert mit.numiter == j From pypy.commits at gmail.com Sun Apr 17 10:06:49 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:06:49 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branches Message-ID: <57139879.c49a1c0a.75623.6985@mx.google.com> Author: mattip Branch: Changeset: r83716:183c27109a2e Date: 2016-04-17 16:55 +0300 http://bitbucket.org/pypy/pypy/changeset/183c27109a2e/ Log: document merged branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,12 @@ .. this is a revision shortly after release-5.1 .. startrev: 2180e1eaf6f6 +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy From pypy.commits at gmail.com Sun Apr 17 10:06:43 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:06:43 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: merge default into branch Message-ID: <57139873.c31f1c0a.9aa65.166f@mx.google.com> Author: mattip Branch: numpy_broadcast Changeset: r83713:000e3811ce8f Date: 2016-04-17 16:36 +0300 http://bitbucket.org/pypy/pypy/changeset/000e3811ce8f/ Log: merge default into branch diff too long, truncating to 2000 out of 8346 lines diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,136 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about a month after PyPy 5.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0 released in March 2015) +========================================================= + +* New features: + + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. + + * Add better support for PyUnicodeObject in the C-API compatibility layer + + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility + +* Bug Fixes + + * Catch exceptions raised in an exit function + + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Support glibc < 2.16 on ARM + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Implemented numpy.where for a single argument + + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy + +* Performance improvements: + + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting + + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend + +* Internal refactorings: + + * Use a simpler logger to speed up translation + + * Drop vestiges of Python 2.5 support in testing + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -0,0 +1,62 @@ +========================= +What's new in PyPy 5.1 +========================= + +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,37 +1,7 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 +.. this is a revision shortly after release-5.1 +.. startrev: 2180e1eaf6f6 -.. branch: s390x-backend - -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. - -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces - -.. branch: win32-lib-name - diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1029,6 +1029,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,9 +277,18 @@ raise NotImplementedError def get_traceback(self): - """Get the PyTraceback object, for app-level Python code. + """Calling this marks the PyTraceback as escaped, i.e. it becomes + accessible and inspectable by app-level Python code. For the JIT. + Note that this has no effect if there are already several traceback + frames recorded, because in this case they are already marked as + escaping by executioncontext.leave() being called with + got_exception=True. """ - return self._application_traceback + from pypy.interpreter.pytraceback import PyTraceback + tb = self._application_traceback + if tb is not None and isinstance(tb, PyTraceback): + tb.frame.mark_as_escaped() + return tb def set_traceback(self, traceback): """Set the current traceback.""" diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -74,6 +74,15 @@ finally: frame_vref = self.topframeref self.topframeref = frame.f_backref + if frame.escaped or got_exception: + # if this frame escaped to applevel, we must ensure that also + # f_back does + f_back = frame.f_backref() + if f_back: + f_back.mark_as_escaped() + # force the frame (from the JIT point of view), so that it can + # be accessed also later + frame_vref() jit.virtual_ref_finish(frame_vref, frame) # ________________________________________________________________ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,6 +65,7 @@ last_exception = None f_backref = jit.vref_None + escaped = False # see mark_as_escaped() debugdata = None pycode = None # code object executed by that frame @@ -151,6 +152,15 @@ assert isinstance(cell, Cell) return cell + def mark_as_escaped(self): + """ + Must be called on frames that are exposed to applevel, e.g. by + sys._getframe(). This ensures that the virtualref holding the frame + is properly forced by ec.leave(), and thus the frame will be still + accessible even after the corresponding C stack died. + """ + self.escaped = True + def append_block(self, block): assert block.previous is self.lastblock self.lastblock = block diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -15,9 +15,9 @@ # ____________________________________________________________ -def sorted(lst, cmp=None, key=None, reverse=False): +def sorted(iterable, cmp=None, key=None, reverse=False): "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(cmp, key, reverse) return sorted_lst diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -172,7 +172,7 @@ def _promote(space, w_obj): """ Promote the first argument of the function and return it. Promote is by value for ints, floats, strs, unicodes (but not subclasses thereof) and by - reference otherwise. + reference otherwise. (Unicodes not supported right now.) This function is experimental!""" from rpython.rlib import jit @@ -181,9 +181,10 @@ elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_str): - jit.promote(space.str_w(w_obj)) + jit.promote_string(space.str_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): - jit.promote(space.unicode_w(w_obj)) + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) else: jit.promote(w_obj) return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -53,7 +53,7 @@ assert _promote(1) == 1 assert _promote(1.1) == 1.1 assert _promote("abc") == "abc" - assert _promote(u"abc") == u"abc" + raises(TypeError, _promote, u"abc") l = [] assert _promote(l) is l class A(object): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) @@ -367,6 +371,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): @@ -127,6 +127,21 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists + space = self.space + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrap(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -125,6 +132,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +182,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] @@ -221,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -313,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -391,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -421,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,25 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. + +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" + # + return w_cdata.unpack(length) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -739,4 +758,5 @@ sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), **_extras) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,6 +78,12 @@ # ____________________________________________________________ + at unwrap_spec(w_cdata=cdataobj.W_CData, length=int) +def unpack(space, w_cdata, length): + return w_cdata.unpack(length) + +# ____________________________________________________________ + def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3514,3 +3514,72 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_unpack(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, b"abc\x00def") + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) + # + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # + BFunc = new_function_type((BInt, BInt), BInt, False) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -476,3 +476,11 @@ for i in range(5): raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) + + def test_unpack(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -285,6 +285,8 @@ from posix import openpty, fdopen, write, close except ImportError: skip('no openpty on this platform') + if 'gnukfreebsd' in sys.platform: + skip('close() hangs forever on kFreeBSD') read_fd, write_fd = openpty() write(write_fd, 'Abc\n') close(write_fd) diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -2,7 +2,8 @@ import sys class AppTestWin32: - spaceconfig = dict(usemodules=('_multiprocessing',)) + spaceconfig = dict(usemodules=('_multiprocessing', + 'signal', '_rawffi', 'binascii')) def setup_class(cls): if sys.platform != "win32": diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -733,6 +733,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -14,7 +14,7 @@ tmpfile2 = open(self.tmpfilename2, 'wb') tmpfileno2 = tmpfile2.fileno() - import struct, sys + import struct, sys, gc WORD = struct.calcsize('l') @@ -46,6 +46,8 @@ return count import _vmprof + gc.collect() # try to make the weakref list deterministic + gc.collect() # by freeing all dead code objects _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() @@ -57,6 +59,8 @@ pass """ in d + gc.collect() + gc.collect() _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -556,18 +556,18 @@ self.w_class = None self.method = method - if self.canoverflow: - assert self.bytes <= rffi.sizeof(rffi.ULONG) - if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': - # Treat this type as a ULONG - self.unwrap = 'bigint_w' - self.canoverflow = False - def _freeze_(self): # hint for the annotator: track individual constant instances return True +if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG): + # 32 bits: UINT can't safely overflow into a C long (rpython int) + # via int_w, handle it like ULONG below + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'bigint_w') +else: + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'int_w', True) types = { 'c': TypeCode(lltype.Char, 'str_w', method=''), 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), @@ -576,7 +576,7 @@ 'h': TypeCode(rffi.SHORT, 'int_w', True, True), 'H': TypeCode(rffi.USHORT, 'int_w', True), 'i': TypeCode(rffi.INT, 'int_w', True, True), - 'I': TypeCode(rffi.UINT, 'int_w', True), + 'I': _UINTTypeCode, 'l': TypeCode(rffi.LONG, 'int_w', True, True), 'L': TypeCode(rffi.ULONG, 'bigint_w'), # Overflow handled by # rbigint.touint() which diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -99,12 +99,13 @@ for tc in 'BHIL': a = self.array(tc) - vals = [0, 2 ** a.itemsize - 1] + itembits = a.itemsize * 8 + vals = [0, 2 ** itembits - 1] a.fromlist(vals) assert a.tolist() == vals a = self.array(tc.lower()) - vals = [-1 * (2 ** a.itemsize) / 2, (2 ** a.itemsize) / 2 - 1] + vals = [-1 * (2 ** itembits) / 2, (2 ** itembits) / 2 - 1] a.fromlist(vals) assert a.tolist() == vals diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -231,7 +219,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -265,6 +254,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -272,10 +263,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -363,7 +350,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -792,10 +780,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -804,7 +793,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -821,6 +811,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -896,13 +891,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -995,18 +996,12 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1016,19 +1011,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1045,13 +1049,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1158,7 +1159,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1200,14 +1202,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -132,7 +132,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,69 +1,9 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H +/* For testing ndarrayobject only */ + #include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -91,18 +31,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - /* * These characters correspond to the array type and the struct * module @@ -157,27 +85,6 @@ }; typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -186,7 +93,6 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -200,729 +106,6 @@ NPY_KEEPORDER=2 } NPY_ORDER; -/* For specifying allowed casting in operations which support it */ -typedef enum { - /* Only allow identical types */ - NPY_NO_CASTING=0, - /* Allow identical and byte swapped types */ - NPY_EQUIV_CASTING=1, - /* Only allow safe casts */ - NPY_SAFE_CASTING=2, - /* Allow safe casts or casts within the same kind */ - NPY_SAME_KIND_CASTING=3, - /* Allow any casts */ - NPY_UNSAFE_CASTING=4, - - /* - * Temporary internal definition only, will be removed in upcoming - * release, see below - * */ - NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, -} NPY_CASTING; - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -/* The special not-a-time (NaT) value */ -#define NPY_DATETIME_NAT NPY_MIN_INT64 - -/* - * Upper bound on the length of a DATETIME ISO 8601 string - * YEAR: 21 (64-bit year) - * MONTH: 3 - * DAY: 3 - * HOURS: 3 - * MINUTES: 3 - * SECONDS: 3 - * ATTOSECONDS: 1 + 3*6 - * TIMEZONE: 5 - * NULL TERMINATOR: 1 - */ -#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) - -typedef enum { - NPY_FR_Y = 0, /* Years */ - NPY_FR_M = 1, /* Months */ - NPY_FR_W = 2, /* Weeks */ - /* Gap where 1.6 NPY_FR_B (value 3) was */ - NPY_FR_D = 4, /* Days */ - NPY_FR_h = 5, /* hours */ - NPY_FR_m = 6, /* minutes */ - NPY_FR_s = 7, /* seconds */ - NPY_FR_ms = 8, /* milliseconds */ - NPY_FR_us = 9, /* microseconds */ - NPY_FR_ns = 10,/* nanoseconds */ - NPY_FR_ps = 11,/* picoseconds */ - NPY_FR_fs = 12,/* femtoseconds */ - NPY_FR_as = 13,/* attoseconds */ - NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ -} NPY_DATETIMEUNIT; - -/* - * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS - * is technically one more than the actual number of units. - */ -#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC - -/* - * Business day conventions for mapping invalid business - * days to valid business days. - */ -typedef enum { - /* Go forward in time to the following business day. */ - NPY_BUSDAY_FORWARD, - NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, - /* Go backward in time to the preceding business day. */ - NPY_BUSDAY_BACKWARD, - NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, - /* - * Go forward in time to the following business day, unless it - * crosses a month boundary, in which case go backward - */ - NPY_BUSDAY_MODIFIEDFOLLOWING, - /* - * Go backward in time to the preceding business day, unless it - * crosses a month boundary, in which case go forward. - */ - NPY_BUSDAY_MODIFIEDPRECEDING, - /* Produce a NaT for non-business days. */ - NPY_BUSDAY_NAT, - /* Raise an exception for non-business days. */ - NPY_BUSDAY_RAISE -} NPY_BUSDAY_ROLL; - -/************************************************************ - * NumPy Auxiliary Data for inner loops, sort functions, etc. - ************************************************************/ - -/* - * When creating an auxiliary data struct, this should always appear - * as the first member, like this: - * - * typedef struct { - * NpyAuxData base; - * double constant; - * } constant_multiplier_aux_data; - */ -typedef struct NpyAuxData_tag NpyAuxData; - -/* Function pointers for freeing or cloning auxiliary data */ -typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); -typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); - -struct NpyAuxData_tag { - NpyAuxData_FreeFunc *free; - NpyAuxData_CloneFunc *clone; - /* To allow for a bit of expansion without breaking the ABI */ - void *reserved[2]; -}; - -/* Macros to use for freeing and cloning auxiliary data */ -#define NPY_AUXDATA_FREE(auxdata) \ - do { \ - if ((auxdata) != NULL) { \ - (auxdata)->free(auxdata); \ - } \ - } while(0) -#define NPY_AUXDATA_CLONE(auxdata) \ - ((auxdata)->clone(auxdata)) - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* - * Macros to define how array, and dimension/strides data is - * allocated. - */ - - /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - -/* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - -/* - * These assume aligned and notswapped data -- a buffer will be used - * before or contiguous data will be obtained - */ - -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* - * XXX the ignore argument should be removed next time the API version - * is bumped. It used to be the separator. - */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); -typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); -typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, - npy_intp *, npy_intp *, - void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* - * Functions to cast to most other standard types - * Can have some NULL entries. The types - * DATETIME, TIMEDELTA, and HALF go into the castdict - * even though they are built-in. - */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; - - /* The next four functions *cannot* be NULL */ - - /* - * Functions to get and set items with standard Python types - * -- not array scalars - */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* - * Copy and/or swap data. Memory areas may not overlap - * Use memmove first if they might - */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* - * Function to compare items - * Can be NULL - */ - PyArray_CompareFunc *compare; - - /* - * Function to select largest - * Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* - * Function to compute dot product - * Can be NULL - */ - PyArray_DotFunc *dotfunc; - - /* - * Function to scan an ASCII file and - * place a single value plus possible separator - * Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* - * Function to read a single value from a string - * and adjust the pointer; Can be NULL - */ - PyArray_FromStrFunc *fromstr; - - /* - * Function to determine if data is zero or not - * If NULL a default version is - * used at Registration time. - */ - PyArray_NonzeroFunc *nonzero; - - /* - * Used for arange. - * Can be NULL. - */ - PyArray_FillFunc *fill; - - /* - * Function to fill arrays with scalar values - * Can be NULL - */ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* - * Sorting functions - * Can be NULL - */ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* - * Dictionary of additional casting functions - * PyArray_VectorUnaryFuncs - * which can be populated to support casting - * to other registered types. Can be NULL - */ - PyObject *castdict; - - /* - * Functions useful for generalizing - * the casting rules. - * Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* - * Function to select smallest - * Can be NULL - */ - PyArray_ArgFunc *argmin; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* A sticky flag specifically for structured arrays */ -#define NPY_ALIGNED_STRUCT 0x80 - -/* - *These are inherited for global data-type if any data-types in the - * field have them - */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - /* - * the type object representing an - * instance of this type -- should not - * be two type_numbers with the same type - * object. - */ - PyTypeObject *typeobj; - /* kind for this type */ - char kind; - /* unique-character representing this type */ - char type; - /* - * '>' (big), '<' (little), '|' - * (not-applicable), or '=' (native). - */ - char byteorder; - /* flags describing data type */ - char flags; - /* number representing this type */ - int type_num; - /* element size (itemsize) for this type */ - int elsize; - /* alignment needed for this type */ - int alignment; - /* - * Non-NULL if this type is - * is an array (C-contiguous) - * of some other type - */ - struct _arr_descr *subarray; - /* - * The fields dictionary for this type - * For statically defined descr this - * is always Py_None - */ - PyObject *fields; - /* - * An ordered tuple of field names or NULL - * if no fields are defined - */ - PyObject *names; - /* - * a table of functions specific for each - * basic data descriptor - */ - PyArray_ArrFuncs *f; - /* Metadata about this dtype */ - PyObject *metadata; - /* - * Metadata specific to the C implementation - * of the particular dtype. This was added - * for NumPy 1.7.0. - */ - NpyAuxData *c_metadata; -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; From pypy.commits at gmail.com Sun Apr 17 10:06:45 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:06:45 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast: close branch to be merged Message-ID: <57139875.623ec20a.f5e74.2654@mx.google.com> Author: mattip Branch: numpy_broadcast Changeset: r83714:93a93176ff67 Date: 2016-04-17 16:36 +0300 http://bitbucket.org/pypy/pypy/changeset/93a93176ff67/ Log: close branch to be merged From pypy.commits at gmail.com Sun Apr 17 10:21:18 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 17 Apr 2016 07:21:18 -0700 (PDT) Subject: [pypy-commit] pypy default: update contributors Message-ID: <57139bde.c653c20a.3963e.5adc@mx.google.com> Author: mattip Branch: Changeset: r83717:4dcdda0db30e Date: 2016-04-17 17:20 +0300 http://bitbucket.org/pypy/pypy/changeset/4dcdda0db30e/ Log: update contributors diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher From pypy.commits at gmail.com Sun Apr 17 12:26:10 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 09:26:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: A failing test Message-ID: <5713b922.0c371c0a.4fa73.ffff881d@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83718:33ba697be3c6 Date: 2016-04-17 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/33ba697be3c6/ Log: A failing test diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -6,14 +6,16 @@ extern "C" { #endif -/* fake PyComplexObject so that code that doesn't do direct field access works */ -#define PyComplexObject PyObject - typedef struct Py_complex_t { double real; double imag; } Py_complex; +typedef struct { + PyObject_HEAD + Py_complex cval; +} PyComplexObject; + /* generated function */ PyAPI_FUNC(int) _PyComplex_AsCComplex(PyObject *, Py_complex *); PyAPI_FUNC(PyObject *) _PyComplex_FromCComplex(Py_complex *); diff --git a/pypy/module/cpyext/test/test_complexobject.py b/pypy/module/cpyext/test/test_complexobject.py --- a/pypy/module/cpyext/test/test_complexobject.py +++ b/pypy/module/cpyext/test/test_complexobject.py @@ -40,3 +40,16 @@ return PyComplex_FromCComplex(c); """)]) assert module.test() == 1.2 + 3.4j + + def test_PyComplex_to_WComplex(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + """ + Py_complex c = {1.2, 3.4}; + PyObject *obj = PyObject_Malloc(sizeof(PyComplexObject)); + obj = PyObject_Init(obj, &PyComplex_Type); + assert(obj != NULL); + ((PyComplexObject *)obj)->cval = c; + return obj; + """)]) + assert module.test() == 1.2 + 3.4j From pypy.commits at gmail.com Sun Apr 17 12:35:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 09:35:27 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Raise a SystemError when the situation described in the test happens. Message-ID: <5713bb4f.a2f2c20a.320cd.55ea@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83719:bfdb061894ce Date: 2016-04-17 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/bfdb061894ce/ Log: Raise a SystemError when the situation described in the test happens. (The test itself needs to be fixed) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -1,5 +1,6 @@ import sys +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.baseobjspace import W_Root, SpaceCache from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.extregistry import ExtRegistryEntry @@ -63,7 +64,15 @@ def realize(self, space, obj): w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) - w_obj = space.allocate_instance(self.W_BaseObject, w_type) + try: + w_obj = space.allocate_instance(self.W_BaseObject, w_type) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_SystemError, + "cpyext: don't know how to make a '%N' object " + "from a PyObject", + w_type) + raise track_reference(space, obj, w_obj) return w_obj From pypy.commits at gmail.com Sun Apr 17 12:57:18 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 09:57:18 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Call make_typedescr() in complexobject.py too. This fixes the existing Message-ID: <5713c06e.92371c0a.e4f71.61b3@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83720:703a0b78f622 Date: 2016-04-17 18:57 +0200 http://bitbucket.org/pypy/pypy/changeset/703a0b78f622/ Log: Call make_typedescr() in complexobject.py too. This fixes the existing failing test in test_complexobject.py, as well as the new one added now. diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -1,16 +1,51 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, cpython_api, cpython_struct, PyObject, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.objspace.std.complexobject import W_ComplexObject from pypy.interpreter.error import OperationError PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex") -Py_complex_t = lltype.ForwardReference() +Py_complex_t = rffi.CStruct('Py_complex_t', + ('real', rffi.DOUBLE), + ('imag', rffi.DOUBLE), + hints={'size': 2 * rffi.sizeof(rffi.DOUBLE)}) Py_complex_ptr = lltype.Ptr(Py_complex_t) -Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE)) -cpython_struct("Py_complex", Py_complex_fields, Py_complex_t) + +PyComplexObjectStruct = lltype.ForwardReference() +PyComplexObject = lltype.Ptr(PyComplexObjectStruct) +PyComplexObjectFields = PyObjectFields + \ + (("cval", Py_complex_t),) +cpython_struct("PyComplexObject", PyComplexObjectFields, PyComplexObjectStruct) + + at bootstrap_function +def init_complexobject(space): + "Type description of PyComplexObject" + make_typedescr(space.w_complex.layout.typedef, + basestruct=PyComplexObject.TO, + attach=complex_attach, + realize=complex_realize) + +def complex_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyComplexObject with the given complex object. The + value must not be modified. + """ + assert isinstance(w_obj, W_ComplexObject) + py_obj = rffi.cast(PyComplexObject, py_obj) + py_obj.c_cval.c_real = w_obj.realval + py_obj.c_cval.c_imag = w_obj.imagval + +def complex_realize(space, obj): + py_obj = rffi.cast(PyComplexObject, obj) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_ComplexObject, w_type) + w_obj.__init__(py_obj.c_cval.c_real, py_obj.c_cval.c_imag) + track_reference(space, obj, w_obj) + return w_obj @cpython_api([lltype.Float, lltype.Float], PyObject) diff --git a/pypy/module/cpyext/test/test_complexobject.py b/pypy/module/cpyext/test/test_complexobject.py --- a/pypy/module/cpyext/test/test_complexobject.py +++ b/pypy/module/cpyext/test/test_complexobject.py @@ -53,3 +53,12 @@ return obj; """)]) assert module.test() == 1.2 + 3.4j + + def test_WComplex_to_PyComplex(self): + module = self.import_extension('foo', [ + ("test", "METH_O", + """ + Py_complex c = ((PyComplexObject *)args)->cval; + return Py_BuildValue("dd", c.real, c.imag); + """)]) + assert module.test(1.2 + 3.4j) == (1.2, 3.4) From pypy.commits at gmail.com Sun Apr 17 13:09:23 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 10:09:23 -0700 (PDT) Subject: [pypy-commit] cffi default: update the version number to 1.6 Message-ID: <5713c343.96811c0a.ab146.ffffa1be@mx.google.com> Author: Armin Rigo Branch: Changeset: r2665:a1edd8c65596 Date: 2016-04-17 19:09 +0200 http://bitbucket.org/cffi/cffi/changeset/a1edd8c65596/ Log: update the version number to 1.6 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.5.2" +#define CFFI_VERSION "1.6.0" #ifdef MS_WIN32 #include diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.6.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.5' +version = '1.6' # The full version, including alpha/beta/rc tags. -release = '1.5.2' +release = '1.6.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.5.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.6.0.tar.gz - - MD5: fa766133f7299464c8bf857e0c966a82 + - MD5: ... - - SHA: 5239b3aa4f67eed3559c09778096ecd4faeca876 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.5.2', + version='1.6.0', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', '_embedding.h']} From pypy.commits at gmail.com Sun Apr 17 13:13:55 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 10:13:55 -0700 (PDT) Subject: [pypy-commit] cffi default: Mention ffi.unpack(), full doc later Message-ID: <5713c453.e8b3c20a.53533.3199@mx.google.com> Author: Armin Rigo Branch: Changeset: r2666:9971e9cc37dd Date: 2016-04-17 19:14 +0200 http://bitbucket.org/cffi/cffi/changeset/9971e9cc37dd/ Log: Mention ffi.unpack(), full doc later diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -8,6 +8,8 @@ * ffi.list_types() +* ffi.unpack() + v1.5.2 ====== From pypy.commits at gmail.com Sun Apr 17 13:15:40 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Apr 2016 10:15:40 -0700 (PDT) Subject: [pypy-commit] pypy default: That was a bug-fix to the NEON bug-fix Message-ID: <5713c4bc.08851c0a.f6e6b.5723@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83721:d17aa400089d Date: 2016-04-17 13:12 -0400 http://bitbucket.org/pypy/pypy/changeset/d17aa400089d/ Log: That was a bug-fix to the NEON bug-fix diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -78,8 +78,6 @@ * Try harder to not emit NEON instructions on ARM processors without NEON support - * Support glibc < 2.16 on ARM - * Improve the rpython posix module system interaction function calls * Detect a missing class function implementation instead of calling a random From pypy.commits at gmail.com Sun Apr 17 13:15:41 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Apr 2016 10:15:41 -0700 (PDT) Subject: [pypy-commit] pypy default: Sandbox didn't work in 5.0 Message-ID: <5713c4bd.d51f1c0a.52599.ffffaeb9@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83722:85cc15338d89 Date: 2016-04-17 13:14 -0400 http://bitbucket.org/pypy/pypy/changeset/85cc15338d89/ Log: Sandbox didn't work in 5.0 diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -90,6 +90,8 @@ * Fix JIT issue with unpack() on a Trace which contains half-written operations + * Fix sandbox startup (a regression in 5.0) + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy From pypy.commits at gmail.com Sun Apr 17 13:16:49 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 10:16:49 -0700 (PDT) Subject: [pypy-commit] pypy default: update the version number of cffi to 1.6.0 and import the current Message-ID: <5713c501.e7bec20a.ac70.61e3@mx.google.com> Author: Armin Rigo Branch: Changeset: r83723:cb355ffb0a18 Date: 2016-04-17 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/cb355ffb0a18/ Log: update the version number of cffi to 1.6.0 and import the current cffi head (a1edd8c65596) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1231,7 +1231,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1319,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1333,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1347,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1444,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1465,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1492,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.5.2" +VERSION = "1.6.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.6.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -424,3 +424,59 @@ def test_ffi_def_extern(self): ffi = FFI() py.test.raises(ValueError, ffi.def_extern) + + def test_introspect_typedef(self): + ffi = FFI() + ffi.cdef("typedef int foo_t;") + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + # + ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;") + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'foo_t', 'g_t'], + [], []) + + def test_introspect_struct(self): + ffi = FFI() + ffi.cdef("struct foo_s { int a; };") + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi = FFI() + ffi.cdef("union foo_s { int a; };") + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi = FFI() + ffi.cdef("typedef struct { int a; } foo_t;") + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + ffi1 = FFI() + ffi2 = FFI() + ffi1.cdef("typedef signed char schar_t; struct sint_t { int x; };") + ffi2.include(ffi1) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi = FFI() + ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") + ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") + ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) + + def test_unpack(self): + ffi = FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -496,3 +496,10 @@ assert i < 20 time.sleep(0.51) assert seen == ['init!', 'oops'] * 3 + +def test_unpack(): + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1744,3 +1744,125 @@ lib.mycb1 = lib.foo assert lib.mycb1(200) == 242 assert lib.indirect_call(300) == 342 + +def test_introspect_function(): + ffi = FFI() + ffi.cdef("float f1(double);") + lib = verify(ffi, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + +def test_introspect_global_var(): + ffi = FFI() + ffi.cdef("float g1;") + lib = verify(ffi, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + +def test_introspect_global_var_array(): + ffi = FFI() + ffi.cdef("float g1[100];") + lib = verify(ffi, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + +def test_introspect_integer_const(): + ffi = FFI() + ffi.cdef("#define FOO 42") + lib = verify(ffi, 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + +def test_introspect_typedef(): + ffi = FFI() + ffi.cdef("typedef int foo_t;") + lib = verify(ffi, 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + +def test_introspect_typedef_multiple(): + ffi = FFI() + ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;") + lib = verify(ffi, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + +def test_introspect_struct(): + ffi = FFI() + ffi.cdef("struct foo_s { int a; };") + lib = verify(ffi, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + +def test_introspect_union(): + ffi = FFI() + ffi.cdef("union foo_s { int a; };") + lib = verify(ffi, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + +def test_introspect_struct_and_typedef(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } foo_t;") + lib = verify(ffi, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + +def test_introspect_included_type(): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1 = FFI() + ffi1.cdef(SOURCE) + ffi2 = FFI() + ffi2.include(ffi1) + verify(ffi1, "test_introspect_included_type_parent", SOURCE) + verify(ffi2, "test_introspect_included_type", SOURCE) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + +def test_introspect_order(): + ffi = FFI() + ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") + ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") + ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + verify(ffi, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -695,25 +695,14 @@ assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" assert ffi.string(ffi.cast('enum ee', -10)) == "EE3" # - # try again - ffi.verify("enum ee { EE1=10, EE2, EE3=-10, EE4 };") - assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" - # assert ffi.typeof("enum ee").relements == {'EE1': 10, 'EE2': 11, 'EE3': -10} assert ffi.typeof("enum ee").elements == {10: 'EE1', 11: 'EE2', -10: 'EE3'} def test_full_enum(): ffi = FFI() ffi.cdef("enum ee { EE1, EE2, EE3 };") - ffi.verify("enum ee { EE1, EE2, EE3 };") - py.test.raises(VerificationError, ffi.verify, "enum ee { EE1, EE2 };") - # disabled: for now, we always accept and fix transparently constant values - #e = py.test.raises(VerificationError, ffi.verify, - # "enum ee { EE1, EE3, EE2 };") - #assert str(e.value) == 'enum ee: EE2 has the real value 2, not 1' - # extra items cannot be seen and have no bad consequence anyway - lib = ffi.verify("enum ee { EE1, EE2, EE3, EE4 };") - assert lib.EE3 == 2 + lib = ffi.verify("enum ee { EE1, EE2, EE3 };") + assert [lib.EE1, lib.EE2, lib.EE3] == [0, 1, 2] def test_enum_usage(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -143,7 +143,7 @@ env_extra[envname] = libpath for key, value in sorted(env_extra.items()): if os.environ.get(key) != value: - print '* setting env var %r to %r' % (key, value) + print('* setting env var %r to %r' % (key, value)) os.environ[key] = value def execute(self, name): @@ -165,6 +165,9 @@ class TestBasic(EmbeddingTests): + def test_empty(self): + empty_cffi = self.prepare_module('empty') + def test_basic(self): add1_cffi = self.prepare_module('add1') self.compile('add1-test', [add1_cffi]) From pypy.commits at gmail.com Sun Apr 17 13:39:43 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Apr 2016 10:39:43 -0700 (PDT) Subject: [pypy-commit] pypy default: was missing: ffi.list_types() Message-ID: <5713ca5f.2a18c20a.d2685.349c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83724:b1514552eaec Date: 2016-04-17 19:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b1514552eaec/ Log: was missing: ffi.list_types() diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -630,6 +630,38 @@ return w_result + def descr_list_types(self): + """\ +Returns the user type names known to this FFI instance. +This returns a tuple containing three lists of names: +(typedef_names, names_of_structs, names_of_unions)""" + # + space = self.space + ctx = self.ctxobj.ctx + + lst1_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_typenames')): + s = rffi.charp2str(ctx.c_typenames[i].c_name) + lst1_w.append(space.wrap(s)) + + lst2_w = [] + lst3_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_struct_unions')): + su = ctx.c_struct_unions[i] + if su.c_name[0] == '$': + continue + s = rffi.charp2str(su.c_name) + if rffi.getintfield(su, 'c_flags') & cffi_opcode.F_UNION: + lst_w = lst3_w + else: + lst_w = lst2_w + lst_w.append(space.wrap(s)) + + return space.newtuple([space.newlist(lst1_w), + space.newlist(lst2_w), + space.newlist(lst3_w)]) + + def descr_init_once(self, w_func, w_tag): """\ init_once(function, tag): run function() once. More precisely, @@ -750,6 +782,7 @@ getctype = interp2app(W_FFIObject.descr_getctype), init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), + list_types = interp2app(W_FFIObject.descr_list_types), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1626,3 +1626,150 @@ # a case where 'onerror' is not callable raises(TypeError, ffi.def_extern(name='bar', onerror=42), lambda x: x) + + def test_extern_python_stdcall(self): + ffi, lib = self.prepare(""" + extern "Python" int __stdcall foo(int); + extern "Python" int WINAPI bar(int); + int (__stdcall * mycb1)(int); + int indirect_call(int); + """, 'test_extern_python_stdcall', """ + #ifndef _MSC_VER + # define __stdcall + #endif + static int (__stdcall * mycb1)(int); + static int indirect_call(int x) { + return mycb1(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x + 42 + @ffi.def_extern() + def bar(x): + return x + 43 + assert lib.foo(100) == 142 + assert lib.bar(100) == 143 + lib.mycb1 = lib.foo + assert lib.mycb1(200) == 242 + assert lib.indirect_call(300) == 342 + + def test_introspect_function(self): + ffi, lib = self.prepare(""" + float f1(double); + """, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + + def test_introspect_global_var(self): + ffi, lib = self.prepare(""" + float g1; + """, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + + def test_introspect_global_var_array(self): + ffi, lib = self.prepare(""" + float g1[100]; + """, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + + def test_introspect_integer_const(self): + ffi, lib = self.prepare("#define FOO 42", + 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + + def test_introspect_typedef(self): + ffi, lib = self.prepare("typedef int foo_t;", + 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + + def test_introspect_typedef_multiple(self): + ffi, lib = self.prepare(""" + typedef signed char a_t, c_t, g_t, b_t; + """, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + + def test_introspect_struct(self): + ffi, lib = self.prepare(""" + struct foo_s { int a; }; + """, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi, lib = self.prepare(""" + union foo_s { int a; }; + """, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi, lib = self.prepare(""" + typedef struct { int a; } foo_t; + """, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1, lib1 = self.prepare(SOURCE, + "test_introspect_included_type_parent", SOURCE) + ffi2, lib2 = self.prepare("", + "test_introspect_included_type", SOURCE, + includes=[ffi1]) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi, lib = self.prepare(""" + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) From pypy.commits at gmail.com Sun Apr 17 20:52:36 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sun, 17 Apr 2016 17:52:36 -0700 (PDT) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <57142fd4.0113c20a.8b306.ffffcd9d@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83725:df440f20d566 Date: 2016-04-17 17:51 -0700 http://bitbucket.org/pypy/pypy/changeset/df440f20d566/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1231,7 +1231,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1319,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1333,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1347,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1444,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1465,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1492,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -78,8 +78,6 @@ * Try harder to not emit NEON instructions on ARM processors without NEON support - * Support glibc < 2.16 on ARM - * Improve the rpython posix module system interaction function calls * Detect a missing class function implementation instead of calling a random @@ -92,6 +90,8 @@ * Fix JIT issue with unpack() on a Trace which contains half-written operations + * Fix sandbox startup (a regression in 5.0) + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,12 @@ .. this is a revision shortly after release-5.1 .. startrev: 2180e1eaf6f6 +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1051,6 +1051,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.5.2" +VERSION = "1.6.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -48,7 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', - 'rawstring': 'func.rawstring', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) @@ -367,6 +371,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -109,21 +109,6 @@ def typeoffsetof_index(self, index): return self.ctptr.typeoffsetof_index(index) - def rawstring(self, w_cdata): - if isinstance(self.ctitem, ctypeprim.W_CTypePrimitive): - space = self.space - length = w_cdata.get_array_length() - if self.ctitem.size == rffi.sizeof(lltype.Char): - with w_cdata as ptr: - s = rffi.charpsize2str(ptr, length) - return space.wrapbytes(s) - elif self.is_unichar_ptr_or_array(): - with w_cdata as ptr: - cdata = rffi.cast(rffi.CWCHARP, ptr) - u = rffi.wcharpsize2unicode(cdata, length) - return space.wrap(u) - return W_CTypePtrOrArray.rawstring(self, w_cdata) - class W_CDataIter(W_Root): _immutable_fields_ = ['ctitem', 'cdata', '_stop'] # but not '_next' diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): @@ -127,11 +127,20 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) - def rawstring(self, cdataobj): + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists space = self.space - raise oefmt(space.w_TypeError, - "expected a 'char[]' or 'uint8_t[]' or 'int8_t[]' " - "or 'wchar_t[]', got '%s'", self.name) + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) def add(self, cdata, i): space = self.space diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrapbytes(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -125,6 +132,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +182,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] @@ -221,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -313,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -391,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -421,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,19 +542,23 @@ return w_cdata.ctype.string(w_cdata, maxlen) - @unwrap_spec(w_cdata=W_CData) - def descr_rawstring(self, w_cdata): - """\ -Convert a cdata that is an array of 'char' or 'wchar_t' to -a byte or unicode string. Unlike ffi.string(), it does not stop -at the first null. + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. -Note that if you have a pointer and an explicit length, you -can use 'p[0:length]' to make an array view. This is similar to -the construct 'list(p[0:length])', which returns a list of chars/ -unichars/ints/floats.""" +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" # - return w_cdata.ctype.rawstring(w_cdata) + return w_cdata.unpack(length) def descr_sizeof(self, w_arg): @@ -626,6 +630,38 @@ return w_result + def descr_list_types(self): + """\ +Returns the user type names known to this FFI instance. +This returns a tuple containing three lists of names: +(typedef_names, names_of_structs, names_of_unions)""" + # + space = self.space + ctx = self.ctxobj.ctx + + lst1_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_typenames')): + s = rffi.charp2str(ctx.c_typenames[i].c_name) + lst1_w.append(space.wrap(s)) + + lst2_w = [] + lst3_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_struct_unions')): + su = ctx.c_struct_unions[i] + if su.c_name[0] == '$': + continue + s = rffi.charp2str(su.c_name) + if rffi.getintfield(su, 'c_flags') & cffi_opcode.F_UNION: + lst_w = lst3_w + else: + lst_w = lst2_w + lst_w.append(space.wrap(s)) + + return space.newtuple([space.newlist(lst1_w), + space.newlist(lst2_w), + space.newlist(lst3_w)]) + + def descr_init_once(self, w_func, w_tag): """\ init_once(function, tag): run function() once. More precisely, @@ -746,13 +782,14 @@ getctype = interp2app(W_FFIObject.descr_getctype), init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), + list_types = interp2app(W_FFIObject.descr_list_types), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), offsetof = interp2app(W_FFIObject.descr_offsetof), - rawstring = interp2app(W_FFIObject.descr_rawstring), sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), **_extras) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,9 +78,9 @@ # ____________________________________________________________ - at unwrap_spec(w_cdata=cdataobj.W_CData) -def rawstring(space, w_cdata): - return w_cdata.ctype.rawstring(w_cdata) + at unwrap_spec(w_cdata=cdataobj.W_CData, length=int) +def unpack(space, w_cdata, length): + return w_cdata.unpack(length) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.6.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -3515,21 +3515,71 @@ _get_common_types(d) assert d['bool'] == '_Bool' -def test_rawstring(): +def test_unpack(): BChar = new_primitive_type("char") BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] - p = newp(BArray, "abc\x00def") - assert rawstring(p) == "abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == "bc\x00de" + p = newp(BArray, b"abc\x00def") + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" BWChar = new_primitive_type("wchar_t") BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] p = newp(BArray, u"abc\x00def") - assert rawstring(p) == u"abc\x00def\x00\x00\x00" - assert rawstring(p[1:6]) == u"bc\x00de" - BChar = new_primitive_type("uint8_t") - BArray = new_array_type(new_pointer_type(BChar), 10) # uint8_t[10] - p = newp(BArray, [65 + i for i in range(10)]) - assert rawstring(p) == "ABCDEFGHIJ" + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) # - py.test.raises(TypeError, rawstring, "foobar") - py.test.raises(TypeError, rawstring, p + 1) + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # + BFunc = new_function_type((BInt, BInt), BInt, False) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -477,15 +477,10 @@ raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) - def test_rawstring(self): + def test_unpack(self): import _cffi_backend as _cffi1_backend ffi = _cffi1_backend.FFI() - p = ffi.new("char[]", "abc\x00def") - assert ffi.rawstring(p) == "abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == "bc\x00de" - p = ffi.new("wchar_t[]", u"abc\x00def") - assert ffi.rawstring(p) == u"abc\x00def\x00" - assert ffi.rawstring(p[1:6]) == u"bc\x00de" - # - raises(TypeError, ffi.rawstring, "foobar") - raises(TypeError, ffi.rawstring, p + 1) + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1626,3 +1626,150 @@ # a case where 'onerror' is not callable raises(TypeError, ffi.def_extern(name='bar', onerror=42), lambda x: x) + + def test_extern_python_stdcall(self): + ffi, lib = self.prepare(""" + extern "Python" int __stdcall foo(int); + extern "Python" int WINAPI bar(int); + int (__stdcall * mycb1)(int); + int indirect_call(int); + """, 'test_extern_python_stdcall', """ + #ifndef _MSC_VER + # define __stdcall + #endif + static int (__stdcall * mycb1)(int); + static int indirect_call(int x) { + return mycb1(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x + 42 + @ffi.def_extern() + def bar(x): + return x + 43 + assert lib.foo(100) == 142 + assert lib.bar(100) == 143 + lib.mycb1 = lib.foo + assert lib.mycb1(200) == 242 + assert lib.indirect_call(300) == 342 + + def test_introspect_function(self): + ffi, lib = self.prepare(""" + float f1(double); + """, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + + def test_introspect_global_var(self): + ffi, lib = self.prepare(""" + float g1; + """, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + + def test_introspect_global_var_array(self): + ffi, lib = self.prepare(""" + float g1[100]; + """, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + + def test_introspect_integer_const(self): + ffi, lib = self.prepare("#define FOO 42", + 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + + def test_introspect_typedef(self): + ffi, lib = self.prepare("typedef int foo_t;", + 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + + def test_introspect_typedef_multiple(self): + ffi, lib = self.prepare(""" + typedef signed char a_t, c_t, g_t, b_t; + """, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + + def test_introspect_struct(self): + ffi, lib = self.prepare(""" + struct foo_s { int a; }; + """, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi, lib = self.prepare(""" + union foo_s { int a; }; + """, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi, lib = self.prepare(""" + typedef struct { int a; } foo_t; + """, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1, lib1 = self.prepare(SOURCE, + "test_introspect_included_type_parent", SOURCE) + ffi2, lib2 = self.prepare("", + "test_introspect_included_type", SOURCE, + includes=[ffi1]) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi, lib = self.prepare(""" + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -32,6 +32,7 @@ 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', 'nditer': 'nditer.W_NDIter', + 'broadcast': 'broadcast.W_Broadcast', 'set_docstring': 'support.descr_set_docstring', 'VisibleDeprecationWarning': 'support.W_VisibleDeprecationWarning', diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/broadcast.py @@ -0,0 +1,110 @@ +import pypy.module.micronumpy.constants as NPY +from nditer import ConcreteIter, parse_op_flag, parse_op_arg +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.module.micronumpy import support +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject +from rpython.rlib import jit +from strides import calculate_broadcast_strides, shape_agreement_multiple + +def descr_new_broadcast(space, w_subtype, __args__): + return W_Broadcast(space, __args__.arguments_w) + +class W_Broadcast(W_NumpyObject): + """ + Implementation of numpy.broadcast. + This class is a simplified version of nditer.W_NDIter with fixed iteration for broadcasted arrays. + """ + + def __init__(self, space, args): + num_args = len(args) + if not (2 <= num_args <= NPY.MAXARGS): + raise oefmt(space.w_ValueError, + "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) + + self.seq = [convert_to_array(space, w_elem) + for w_elem in args] + + self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, + len(self.seq), parse_op_flag) + + self.shape = shape_agreement_multiple(space, self.seq, shape=None) + self.order = NPY.CORDER + + self.iters = [] + self.index = 0 + + try: + self.size = support.product_check(self.shape) + except OverflowError as e: + raise oefmt(space.w_ValueError, "broadcast dimensions too large.") + for i in range(len(self.seq)): + it = self.get_iter(space, i) + it.contiguous = False + self.iters.append((it, it.reset())) + + self.done = False + pass + + def get_iter(self, space, i): + arr = self.seq[i] + imp = arr.implementation + if arr.is_scalar(): + return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + + backward = imp.order != self.order + + r = calculate_broadcast_strides(imp.strides, imp.backstrides, imp.shape, + shape, backward) + + iter_shape = shape + if len(shape) != len(r[0]): + # shape can be shorter when using an external loop, just return a view + iter_shape = imp.shape + return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1], + self.op_flags[i], self) + + def descr_iter(self, space): + return space.wrap(self) + + def descr_get_shape(self, space): + return space.newtuple([space.wrap(i) for i in self.shape]) + + def descr_get_size(self, space): + return space.wrap(self.size) + + def descr_get_index(self, space): + return space.wrap(self.index) + + def descr_get_numiter(self, space): + return space.wrap(len(self.iters)) + + @jit.unroll_safe + def descr_next(self, space): + if self.index >= self.size: + self.done = True + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + res = [] + for i, (it, st) in enumerate(self.iters): + res.append(self._get_item(it, st)) + self.iters[i] = (it, it.next(st)) + if len(res) < 2: + return res[0] + return space.newtuple(res) + + def _get_item(self, it, st): + return W_NDimArray(it.getoperand(st)) + + +W_Broadcast.typedef = TypeDef("numpy.broadcast", + __new__=interp2app(descr_new_broadcast), + __iter__=interp2app(W_Broadcast.descr_iter), + next=interp2app(W_Broadcast.descr_next), + shape=GetSetProperty(W_Broadcast.descr_get_shape), + size=GetSetProperty(W_Broadcast.descr_get_size), + index=GetSetProperty(W_Broadcast.descr_get_index), + numiter=GetSetProperty(W_Broadcast.descr_get_numiter), + ) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -77,6 +77,8 @@ WRAP = 1 RAISE = 2 +MAXARGS = 32 + # These can be requested in constructor functions and tested for ARRAY_C_CONTIGUOUS = 0x0001 ARRAY_F_CONTIGUOUS = 0x0002 diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -0,0 +1,97 @@ +# -*- encoding: utf-8 -*- + +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestArrayBroadcast(BaseNumpyAppTest): + def test_broadcast_for_row_and_column(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + + def test_broadcast_properties(self): + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5]) + b = np.broadcast(x, y) + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 0 + + b.next() + b.next() + + assert b.shape == (3, 2) + assert b.size == 6 + assert b.index == 2 + + def test_broadcast_from_doctest(self): + """ + Test from numpy.broadcast doctest. + """ + import numpy as np + x = np.array([[1], [2], [3]]) + y = np.array([4, 5, 6]) + reference = np.array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + b = np.broadcast(x, y) + out = np.empty(b.shape) + out.flat = [u + v for (u, v) in b] + + assert (reference == out).all() + assert out.dtype == reference.dtype + assert b.shape == reference.shape + + def test_broadcast_linear(self): + import numpy as np + x = np.array([1, 2, 3]) + y = np.array([4, 5, 6]) + b = list(np.broadcast(x, y)) + assert b == [(1, 4), (2, 5), (3, 6)] + assert b[0][0].dtype == x.dtype + + def test_broadcast_failures(self): + import numpy as np + import sys + x = np.array([1, 2, 3]) + y = np.array([4, 5]) + raises(ValueError, np.broadcast, x, y) + a = np.empty(2**16,dtype='int8') + a = a.reshape(-1, 1, 1, 1) + b = a.reshape(1, -1, 1, 1) + c = a.reshape(1, 1, -1, 1) + d = a.reshape(1, 1, 1, -1) + exc = raises(ValueError, np.broadcast, a, b, c, d) + assert exc.value[0] == ('broadcast dimensions too large.') + + def test_broadcast_3_args(self): + import numpy as np + x = np.array([[[1]], [[2]], [[3]]]) + y = np.array([[[40], [50]]]) + z = np.array([[[700, 800]]]) + + b = list(np.broadcast(x, y, z)) + + assert b == [(1, 40, 700), (1, 40, 800), (1, 50, 700), (1, 50, 800), + (2, 40, 700), (2, 40, 800), (2, 50, 700), (2, 50, 800), + (3, 40, 700), (3, 40, 800), (3, 50, 700), (3, 50, 800)] + + def test_number_of_arguments(self): + """ + Test from numpy unit tests. + """ + import numpy as np + arr = np.empty((5,)) + for j in range(35): + arrs = [arr] * j + if j < 2 or j > 32: + exc = raises(ValueError, np.broadcast, *arrs) + assert exc.value[0] == ('Need at least two and fewer than (32) array objects.') + else: + mit = np.broadcast(*arrs) + assert mit.numiter == j diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -424,3 +424,59 @@ def test_ffi_def_extern(self): ffi = FFI() py.test.raises(ValueError, ffi.def_extern) + + def test_introspect_typedef(self): + ffi = FFI() + ffi.cdef("typedef int foo_t;") + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + # + ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;") + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'foo_t', 'g_t'], + [], []) + + def test_introspect_struct(self): + ffi = FFI() + ffi.cdef("struct foo_s { int a; };") + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi = FFI() + ffi.cdef("union foo_s { int a; };") + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi = FFI() + ffi.cdef("typedef struct { int a; } foo_t;") + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + ffi1 = FFI() + ffi2 = FFI() + ffi1.cdef("typedef signed char schar_t; struct sint_t { int x; };") + ffi2.include(ffi1) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi = FFI() + ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") + ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") + ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) + + def test_unpack(self): + ffi = FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -496,3 +496,10 @@ assert i < 20 time.sleep(0.51) assert seen == ['init!', 'oops'] * 3 + +def test_unpack(): + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1744,3 +1744,125 @@ lib.mycb1 = lib.foo assert lib.mycb1(200) == 242 assert lib.indirect_call(300) == 342 + +def test_introspect_function(): + ffi = FFI() + ffi.cdef("float f1(double);") + lib = verify(ffi, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + +def test_introspect_global_var(): + ffi = FFI() + ffi.cdef("float g1;") + lib = verify(ffi, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + +def test_introspect_global_var_array(): + ffi = FFI() + ffi.cdef("float g1[100];") + lib = verify(ffi, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + +def test_introspect_integer_const(): + ffi = FFI() + ffi.cdef("#define FOO 42") + lib = verify(ffi, 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + +def test_introspect_typedef(): + ffi = FFI() + ffi.cdef("typedef int foo_t;") + lib = verify(ffi, 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + +def test_introspect_typedef_multiple(): + ffi = FFI() + ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;") + lib = verify(ffi, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + +def test_introspect_struct(): + ffi = FFI() + ffi.cdef("struct foo_s { int a; };") + lib = verify(ffi, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + +def test_introspect_union(): + ffi = FFI() + ffi.cdef("union foo_s { int a; };") + lib = verify(ffi, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + +def test_introspect_struct_and_typedef(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } foo_t;") + lib = verify(ffi, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + +def test_introspect_included_type(): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1 = FFI() + ffi1.cdef(SOURCE) + ffi2 = FFI() + ffi2.include(ffi1) + verify(ffi1, "test_introspect_included_type_parent", SOURCE) + verify(ffi2, "test_introspect_included_type", SOURCE) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + +def test_introspect_order(): + ffi = FFI() + ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") + ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") + ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + verify(ffi, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -695,25 +695,14 @@ assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" assert ffi.string(ffi.cast('enum ee', -10)) == "EE3" # - # try again - ffi.verify("enum ee { EE1=10, EE2, EE3=-10, EE4 };") - assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" - # assert ffi.typeof("enum ee").relements == {'EE1': 10, 'EE2': 11, 'EE3': -10} assert ffi.typeof("enum ee").elements == {10: 'EE1', 11: 'EE2', -10: 'EE3'} def test_full_enum(): ffi = FFI() ffi.cdef("enum ee { EE1, EE2, EE3 };") - ffi.verify("enum ee { EE1, EE2, EE3 };") - py.test.raises(VerificationError, ffi.verify, "enum ee { EE1, EE2 };") - # disabled: for now, we always accept and fix transparently constant values - #e = py.test.raises(VerificationError, ffi.verify, - # "enum ee { EE1, EE3, EE2 };") - #assert str(e.value) == 'enum ee: EE2 has the real value 2, not 1' - # extra items cannot be seen and have no bad consequence anyway - lib = ffi.verify("enum ee { EE1, EE2, EE3, EE4 };") - assert lib.EE3 == 2 + lib = ffi.verify("enum ee { EE1, EE2, EE3 };") + assert [lib.EE1, lib.EE2, lib.EE3] == [0, 1, 2] def test_enum_usage(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -143,7 +143,7 @@ env_extra[envname] = libpath for key, value in sorted(env_extra.items()): if os.environ.get(key) != value: - print '* setting env var %r to %r' % (key, value) + print('* setting env var %r to %r' % (key, value)) os.environ[key] = value def execute(self, name): @@ -165,6 +165,9 @@ class TestBasic(EmbeddingTests): + def test_empty(self): + empty_cffi = self.prepare_module('empty') + def test_basic(self): add1_cffi = self.prepare_module('add1') self.compile('add1-test', [add1_cffi]) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -205,6 +205,12 @@ storage = strategy.erase(list_i) return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_float(space, list_f): + strategy = space.fromcache(FloatListStrategy) + storage = strategy.erase(list_f) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + def __repr__(self): """ representation for debugging purposes """ return "%s(%s, %s)" % (self.__class__.__name__, self.strategy, diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -310,6 +310,9 @@ def newlist_int(self, list_i): return W_ListObject.newlist_int(self, list_i) + def newlist_float(self, list_f): + return W_ListObject.newlist_float(self, list_f) + def newdict(self, module=False, instance=False, kwargs=False, strdict=False): return W_DictMultiObject.allocate_and_init_instance( diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4510,7 +4510,10 @@ continue w = t[2].split() if len(w) == 0: - continue + if '' in line: + w = ['UNDEFINED'] + else: + continue words.append(w[0] + ';') print '[[%s]]' % (w[0],) text = ' '.join(words) diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -36,10 +36,9 @@ # - ALL_STAT_FIELDS contains Float fields if the system can retrieve # sub-second timestamps. # - TIMESPEC is defined when the "struct stat" contains st_atim field. - -try: +if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): from rpython.rlib.rposix import TIMESPEC -except ImportError: +else: TIMESPEC = None From pypy.commits at gmail.com Mon Apr 18 01:51:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 22:51:54 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: instead of many, only one syscall to write is executed while initializing the jitlog (even slows down the test execution) Message-ID: <571475fa.8a37c20a.12947.0d84@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83726:4f0c9af6057b Date: 2016-04-15 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/4f0c9af6057b/ Log: instead of many, only one syscall to write is executed while initializing the jitlog (even slows down the test execution) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -97,9 +97,12 @@ count = len(resoperations.opname) mark = MARK_RESOP_META + content = [encode_le_16bit(len(resoperations.opname))] for opnum, opname in resoperations.opname.items(): - line = encode_le_16bit(opnum) + encode_str(opname.lower()) - cintf.jitlog_write_marked(mark, line, len(line)) + content.append(encode_le_16bit(opnum)) + content.append(encode_str(opname.lower())) + blob = ''.join(content) + cintf.jitlog_write_marked(MARK_RESOP_META, blob, len(blob)) def finish(self): self.cintf.jitlog_teardown() @@ -261,6 +264,7 @@ dump = [] start_offset = ops_offset[op] + assert start_offset >= 0 # end offset is either the last pos in the assembler # or the offset of op2 if op2 is None: From pypy.commits at gmail.com Mon Apr 18 01:51:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 22:51:56 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: use _get_vmprof to get a handle on cintf in the logger, this should resolve the translation issue Message-ID: <571475fc.8bd31c0a.25a49.3652@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83727:9680c234ff1f Date: 2016-04-15 12:29 +0200 http://bitbucket.org/pypy/pypy/changeset/9680c234ff1f/ Log: use _get_vmprof to get a handle on cintf in the logger, this should resolve the translation issue diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,4 +1,4 @@ -from rpython.rlib.rvmprof.rvmprof import cintf +from rpython.rlib.rvmprof.rvmprof import _get_vmprof from rpython.jit.metainterp import resoperation as resoperations from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import ConstInt, ConstFloat @@ -77,7 +77,8 @@ class VMProfJitLogger(object): def __init__(self): - self.cintf = cintf.setup() + self.vmprof = _get_vmprof() + self.cintf = self.vmprof.cintf self.memo = {} def setup_once(self): @@ -86,23 +87,20 @@ self.cintf.jitlog_try_init_using_env() if not self.cintf.jitlog_enabled(): return - VMProfJitLogger._write_header(self.cintf) + blob = VMProfJitLogger.assemble_header() + self.cintf.jitlog_write_marked(MARK_JITLOG_HEADER, blob, len(blob)) @staticmethod @always_inline - def _write_header(cintf): - header = JITLOG_VERSION_16BIT_LE - cintf.jitlog_write_marked(MARK_JITLOG_HEADER, - header, len(header)) - + def assemble_header(): + version = JITLOG_VERSION_16BIT_LE count = len(resoperations.opname) - mark = MARK_RESOP_META - content = [encode_le_16bit(len(resoperations.opname))] + content = [version, chr(MARK_RESOP_META), + encode_le_16bit(count)] for opnum, opname in resoperations.opname.items(): content.append(encode_le_16bit(opnum)) content.append(encode_str(opname.lower())) - blob = ''.join(content) - cintf.jitlog_write_marked(MARK_RESOP_META, blob, len(blob)) + return ''.join(content) def finish(self): self.cintf.jitlog_teardown() diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -127,8 +127,9 @@ p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) - from rpython.jit.metainterp.jitlog import VMProfJitLogger - VMProfJitLogger._write_header(self.cintf) + from rpython.jit.metainterp import jitlog + blob = jitlog.VMProfJitLogger.assemble_header() + self.cintf.jitlog_write_marked(jitlog.MARK_JITLOG_HEADER, blob, len(blob)) def disable(self): """Disable vmprof. From pypy.commits at gmail.com Mon Apr 18 01:51:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 22:51:58 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: solved translation issue Message-ID: <571475fe.4412c30a.ba3b8.fffff40c@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83728:f605ce9c680a Date: 2016-04-18 07:51 +0200 http://bitbucket.org/pypy/pypy/changeset/f605ce9c680a/ Log: solved translation issue diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,4 +1,4 @@ -from rpython.rlib.rvmprof.rvmprof import _get_vmprof +from rpython.rlib.rvmprof import cintf from rpython.jit.metainterp import resoperation as resoperations from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import ConstInt, ConstFloat @@ -30,9 +30,7 @@ # the machine code was patched (e.g. guard) MARK_STITCH_BRIDGE = 0x19 -MARK_JIT_LOOP_COUNTER = 0x20 -MARK_JIT_BRIDGE_COUNTER = 0x21 -MARK_JIT_ENTRY_COUNTER = 0x22 +MARK_JITLOG_COUNTER = 0x20 MARK_JITLOG_HEADER = 0x23 MARK_JITLOG_DEBUG_MERGE_POINT = 0x24 @@ -54,7 +52,6 @@ chr((val >> 16) & 0xff), chr((val >> 24) & 0xff)]) - @always_inline def encode_le_64bit(val): return ''.join([chr((val >> 0) & 0xff), @@ -73,12 +70,19 @@ else: return encode_le_64bit(val) +def assemble_header(): + version = JITLOG_VERSION_16BIT_LE + count = len(resoperations.opname) + content = [version, chr(MARK_RESOP_META), + encode_le_16bit(count)] + for opnum, opname in resoperations.opname.items(): + content.append(encode_le_16bit(opnum)) + content.append(encode_str(opname.lower())) + return ''.join(content) class VMProfJitLogger(object): - def __init__(self): - self.vmprof = _get_vmprof() - self.cintf = self.vmprof.cintf + self.cintf = cintf.setup() self.memo = {} def setup_once(self): @@ -87,21 +91,9 @@ self.cintf.jitlog_try_init_using_env() if not self.cintf.jitlog_enabled(): return - blob = VMProfJitLogger.assemble_header() + blob = assemble_header() self.cintf.jitlog_write_marked(MARK_JITLOG_HEADER, blob, len(blob)) - @staticmethod - @always_inline - def assemble_header(): - version = JITLOG_VERSION_16BIT_LE - count = len(resoperations.opname) - content = [version, chr(MARK_RESOP_META), - encode_le_16bit(count)] - for opnum, opname in resoperations.opname.items(): - content.append(encode_le_16bit(opnum)) - content.append(encode_str(opname.lower())) - return ''.join(content) - def finish(self): self.cintf.jitlog_teardown() @@ -116,13 +108,7 @@ le_addr = encode_le_addr(struct.number) # not an address (but a number) but it is a machine word le_count = encode_le_addr(struct.i) - if struct.type == 'l': - tag = MARK_JIT_LOOP_COUNTER - elif struct.type == 'b': - tag = MARK_JIT_BRIDGE_COUNTER - else: - tag = MARK_JIT_ENTRY_COUNTER - self._write_marked(tag, le_addr + le_count) + self._write_marked(MARK_JITLOG_COUNTER, le_addr + le_count) def log_trace(self, tag, metainterp_sd, mc, memo=None): if not self.cintf.jitlog_enabled(): diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem import rffi, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.rweaklist import RWeakListMixin +from rpython.jit.metainterp import jitlog MAX_FUNC_NAME = 1023 @@ -127,8 +128,7 @@ p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) - from rpython.jit.metainterp import jitlog - blob = jitlog.VMProfJitLogger.assemble_header() + blob = jitlog.assemble_header() self.cintf.jitlog_write_marked(jitlog.MARK_JITLOG_HEADER, blob, len(blob)) def disable(self): From pypy.commits at gmail.com Mon Apr 18 02:27:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 23:27:20 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) prevent setting the key none in reg_bindings Message-ID: <57147e48.c11a1c0a.379f4.6ace@mx.google.com> Author: Richard Plangger Branch: Changeset: r83729:3701004a39a5 Date: 2016-04-18 08:26 +0200 http://bitbucket.org/pypy/pypy/changeset/3701004a39a5/ Log: (s390x) prevent setting the key none in reg_bindings diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -328,6 +328,16 @@ self.free_regs = [fr for fr in self.free_regs \ if fr is not even and \ fr is not odd] + if not even_var: + even_var = TempVar() + self.longevity[even_var] = (self.position, self.position) + self.temp_boxes.append(even_var) + if not odd_var: + odd_var = TempVar() + self.longevity[odd_var] = (self.position, self.position) + self.temp_boxes.append(odd_var) + assert even_var is not None + assert odd_var is not None self.reg_bindings[even_var] = even self.reg_bindings[odd_var] = odd return even, odd @@ -346,6 +356,7 @@ if candidate_var is not None: self._sync_var(candidate_var) self.assembler.regalloc_mov(reg, candidate) + assert var is not None self.reg_bindings[var] = candidate reverse_mapping[reg] = var self.free_regs.append(reg) From pypy.commits at gmail.com Mon Apr 18 02:41:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 23:41:09 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) marked two tests to be brittle, they some times fail only on one machine I do not have access to... Message-ID: <57148185.46291c0a.d9604.2eec@mx.google.com> Author: Richard Plangger Branch: Changeset: r83730:037d02a07a05 Date: 2016-04-18 08:40 +0200 http://bitbucket.org/pypy/pypy/changeset/037d02a07a05/ Log: (s390x) marked two tests to be brittle, they some times fail only on one machine I do not have access to... diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -246,6 +246,8 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_tlref_keepalive(self, no__thread=True): import weakref from rpython.config.translationoption import SUPPORT__THREAD @@ -300,5 +302,7 @@ res = fn() assert res == 42 + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_tlref_keepalive__thread(self): self.test_tlref_keepalive(no__thread=False) From pypy.commits at gmail.com Mon Apr 18 02:43:48 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Apr 2016 23:43:48 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: merged default Message-ID: <57148224.442cc20a.e7cbf.ffffd7c6@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83731:1b0ebfbc08b8 Date: 2016-04-18 08:43 +0200 http://bitbucket.org/pypy/pypy/changeset/1b0ebfbc08b8/ Log: merged default diff too long, truncating to 2000 out of 5786 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1231,7 +1231,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1319,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1333,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1347,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1444,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1465,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1492,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,136 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about a month after PyPy 5.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0 released in March 2015) +========================================================= + +* New features: + + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. + + * Add better support for PyUnicodeObject in the C-API compatibility layer + + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility + +* Bug Fixes + + * Catch exceptions raised in an exit function + + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Fix sandbox startup (a regression in 5.0) + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Implemented numpy.where for a single argument + + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy + +* Performance improvements: + + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting + + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend + +* Internal refactorings: + + * Use a simpler logger to speed up translation + + * Drop vestiges of Python 2.5 support in testing + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -0,0 +1,62 @@ +========================= +What's new in PyPy 5.1 +========================= + +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,54 +1,16 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 - -.. branch: s390x-backend - -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. - -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces - -.. branch: win32-lib-name - -.. branch: remove-frame-forcing-in-executioncontext +.. this is a revision shortly after release-5.1 +.. startrev: 2180e1eaf6f6 .. branch: rposix-for-3 -Wrap more POSIX functions in `rpython.rlib.rposix`. +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast -.. branch: cleanup-history-rewriting - -A local clean-up in the JIT front-end. - -.. branch: jit-constptr-2 - -Remove the forced minor collection that occurs when rewriting the -assembler at the start of the JIT backend. This is done by emitting -the ConstPtrs in a separate table, and loading from the table. It -gives improved warm-up time and memory usage, and also removes -annoying special-purpose code for pinned pointers. +Add broadcast to micronumpy diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1029,6 +1029,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -15,9 +15,9 @@ # ____________________________________________________________ -def sorted(lst, cmp=None, key=None, reverse=False): +def sorted(iterable, cmp=None, key=None, reverse=False): "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(cmp, key, reverse) return sorted_lst diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.5.2" +VERSION = "1.6.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) @@ -367,6 +371,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): @@ -127,6 +127,21 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists + space = self.space + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrap(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -125,6 +132,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +182,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] @@ -221,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -313,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -391,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -421,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,25 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. + +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" + # + return w_cdata.unpack(length) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -611,6 +630,38 @@ return w_result + def descr_list_types(self): + """\ +Returns the user type names known to this FFI instance. +This returns a tuple containing three lists of names: +(typedef_names, names_of_structs, names_of_unions)""" + # + space = self.space + ctx = self.ctxobj.ctx + + lst1_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_typenames')): + s = rffi.charp2str(ctx.c_typenames[i].c_name) + lst1_w.append(space.wrap(s)) + + lst2_w = [] + lst3_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_struct_unions')): + su = ctx.c_struct_unions[i] + if su.c_name[0] == '$': + continue + s = rffi.charp2str(su.c_name) + if rffi.getintfield(su, 'c_flags') & cffi_opcode.F_UNION: + lst_w = lst3_w + else: + lst_w = lst2_w + lst_w.append(space.wrap(s)) + + return space.newtuple([space.newlist(lst1_w), + space.newlist(lst2_w), + space.newlist(lst3_w)]) + + def descr_init_once(self, w_func, w_tag): """\ init_once(function, tag): run function() once. More precisely, @@ -731,6 +782,7 @@ getctype = interp2app(W_FFIObject.descr_getctype), init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), + list_types = interp2app(W_FFIObject.descr_list_types), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), @@ -739,4 +791,5 @@ sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), **_extras) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,6 +78,12 @@ # ____________________________________________________________ + at unwrap_spec(w_cdata=cdataobj.W_CData, length=int) +def unpack(space, w_cdata, length): + return w_cdata.unpack(length) + +# ____________________________________________________________ + def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.6.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -3514,3 +3514,72 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_unpack(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, b"abc\x00def") + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) + # + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # + BFunc = new_function_type((BInt, BInt), BInt, False) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -476,3 +476,11 @@ for i in range(5): raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) + + def test_unpack(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1626,3 +1626,150 @@ # a case where 'onerror' is not callable raises(TypeError, ffi.def_extern(name='bar', onerror=42), lambda x: x) + + def test_extern_python_stdcall(self): + ffi, lib = self.prepare(""" + extern "Python" int __stdcall foo(int); + extern "Python" int WINAPI bar(int); + int (__stdcall * mycb1)(int); + int indirect_call(int); + """, 'test_extern_python_stdcall', """ + #ifndef _MSC_VER + # define __stdcall + #endif + static int (__stdcall * mycb1)(int); + static int indirect_call(int x) { + return mycb1(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x + 42 + @ffi.def_extern() + def bar(x): + return x + 43 + assert lib.foo(100) == 142 + assert lib.bar(100) == 143 + lib.mycb1 = lib.foo + assert lib.mycb1(200) == 242 + assert lib.indirect_call(300) == 342 + + def test_introspect_function(self): + ffi, lib = self.prepare(""" + float f1(double); + """, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + + def test_introspect_global_var(self): + ffi, lib = self.prepare(""" + float g1; + """, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + + def test_introspect_global_var_array(self): + ffi, lib = self.prepare(""" + float g1[100]; + """, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + + def test_introspect_integer_const(self): + ffi, lib = self.prepare("#define FOO 42", + 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + + def test_introspect_typedef(self): + ffi, lib = self.prepare("typedef int foo_t;", + 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + + def test_introspect_typedef_multiple(self): + ffi, lib = self.prepare(""" + typedef signed char a_t, c_t, g_t, b_t; + """, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + + def test_introspect_struct(self): + ffi, lib = self.prepare(""" + struct foo_s { int a; }; + """, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi, lib = self.prepare(""" + union foo_s { int a; }; + """, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi, lib = self.prepare(""" + typedef struct { int a; } foo_t; + """, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1, lib1 = self.prepare(SOURCE, + "test_introspect_included_type_parent", SOURCE) + ffi2, lib2 = self.prepare("", + "test_introspect_included_type", SOURCE, + includes=[ffi1]) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi, lib = self.prepare(""" + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -2,7 +2,8 @@ import sys class AppTestWin32: - spaceconfig = dict(usemodules=('_multiprocessing',)) + spaceconfig = dict(usemodules=('_multiprocessing', + 'signal', '_rawffi', 'binascii')) def setup_class(cls): if sys.platform != "win32": diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -556,18 +556,18 @@ self.w_class = None self.method = method - if self.canoverflow: - assert self.bytes <= rffi.sizeof(rffi.ULONG) - if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': - # Treat this type as a ULONG - self.unwrap = 'bigint_w' - self.canoverflow = False - def _freeze_(self): # hint for the annotator: track individual constant instances return True +if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG): + # 32 bits: UINT can't safely overflow into a C long (rpython int) + # via int_w, handle it like ULONG below + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'bigint_w') +else: + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'int_w', True) types = { 'c': TypeCode(lltype.Char, 'str_w', method=''), 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), @@ -576,7 +576,7 @@ 'h': TypeCode(rffi.SHORT, 'int_w', True, True), 'H': TypeCode(rffi.USHORT, 'int_w', True), 'i': TypeCode(rffi.INT, 'int_w', True, True), - 'I': TypeCode(rffi.UINT, 'int_w', True), + 'I': _UINTTypeCode, 'l': TypeCode(rffi.LONG, 'int_w', True, True), 'L': TypeCode(rffi.ULONG, 'bigint_w'), # Overflow handled by # rbigint.touint() which diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -99,12 +99,13 @@ for tc in 'BHIL': a = self.array(tc) - vals = [0, 2 ** a.itemsize - 1] + itembits = a.itemsize * 8 + vals = [0, 2 ** itembits - 1] a.fromlist(vals) assert a.tolist() == vals a = self.array(tc.lower()) - vals = [-1 * (2 ** a.itemsize) / 2, (2 ** a.itemsize) / 2 - 1] + vals = [-1 * (2 ** itembits) / 2, (2 ** itembits) / 2 - 1] a.fromlist(vals) assert a.tolist() == vals diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -231,7 +219,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -265,6 +254,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -272,10 +263,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -363,7 +350,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -792,10 +780,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -804,7 +793,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -821,6 +811,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -896,13 +891,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -995,7 +996,7 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] @@ -1010,19 +1011,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1039,8 +1049,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1147,7 +1159,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1189,14 +1202,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2))) - -#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ - (k)*PyArray_STRIDE(obj,2) + \ - (l)*PyArray_STRIDE(obj,3))) - -#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,69 +1,9 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H +/* For testing ndarrayobject only */ + #include "numpy/npy_common.h" -//#include "npy_endian.h" -//#include "npy_cpu.h" -//#include "utils.h" - -//for pypy - numpy has lots of typedefs -//for pypy - make life easier, less backward support -#define NPY_1_8_API_VERSION 0x00000008 -#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION -#undef NPY_1_8_API_VERSION - -#define NPY_ENABLE_SEPARATE_COMPILATION 1 -#define NPY_VISIBILITY_HIDDEN - -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - - - -/* - * There are several places in the code where an array of dimensions - * is allocated statically. This is the size of that static - * allocation. - * - * The array creation itself could have arbitrary dimensions but all - * the places where static allocation is used would need to be changed - * to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* - * Binary compatibility version number. This number is increased - * whenever the C-API is changed such that binary compatibility is - * broken, i.e. whenever a recompile of extension modules is needed. - */ -#define NPY_VERSION NPY_ABI_VERSION - -/* - * Minor API version. This number is increased whenever a change is - * made to the C-API -- whether it breaks binary compatibility or not. - * Some changes, such as adding a function pointer to the end of the - * function table, can be made without breaking binary compatibility. - * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - * would be increased. Whenever binary compatibility is broken, both - * NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -91,18 +31,6 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there (excluding half) */ -#define NPY_NUM_FLOATTYPE 3 - /* * These characters correspond to the array type and the struct * module @@ -157,27 +85,6 @@ }; typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_INTROSELECT=0, -} NPY_SELECTKIND; -#define NPY_NSELECTS (NPY_INTROSELECT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -186,7 +93,6 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -200,729 +106,6 @@ NPY_KEEPORDER=2 } NPY_ORDER; From pypy.commits at gmail.com Mon Apr 18 03:36:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Apr 2016 00:36:49 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: renamed variable Message-ID: <57148e91.22c8c20a.2183d.7951@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83732:1d7a7e714afa Date: 2016-04-18 09:33 +0200 http://bitbucket.org/pypy/pypy/changeset/1d7a7e714afa/ Log: renamed variable diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -690,13 +690,11 @@ greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) llres = fn(*greenargs) - if not we_are_translated() and isinstance(llres, tuple): - return llres - return llres # TODO hltuple? + return llres self.get_location = get_location # - get_location_ptr = self.jitdriver_sd._get_printable_location_ptr - if get_location_ptr is None: + printable_loc_ptr = self.jitdriver_sd._printable_loc_ptr + if printable_loc_ptr is None: missing = '(%s: no get_printable_location)' % drivername def get_location_str(greenkey): return missing @@ -712,7 +710,7 @@ if not have_debug_prints_for("jit-"): return missing greenargs = unwrap_greenkey(greenkey) - fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) + fn = support.maybe_on_top_of_llinterp(rtyper, printable_loc_ptr) llres = fn(*greenargs) if not we_are_translated() and isinstance(llres, str): return llres From pypy.commits at gmail.com Mon Apr 18 03:44:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 00:44:14 -0700 (PDT) Subject: [pypy-commit] cffi default: Issue #254: extern "Python+C" Message-ID: <5714904e.43ecc20a.80f19.0d57@mx.google.com> Author: Armin Rigo Branch: Changeset: r2667:ecacf3063964 Date: 2016-04-18 09:44 +0200 http://bitbucket.org/cffi/cffi/changeset/ecacf3063964/ Log: Issue #254: extern "Python+C" diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -29,7 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -88,6 +89,12 @@ # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) @@ -98,7 +105,10 @@ #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) - parts.append('void __cffi_extern_python_start; ') + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) @@ -302,7 +312,7 @@ break # try: - self._inside_extern_python = False + self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -376,8 +386,10 @@ tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' - elif self._inside_extern_python: + elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) @@ -421,11 +433,9 @@ # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" - self._inside_extern_python = not self._inside_extern_python - assert self._inside_extern_python == ( - decl.name == '__cffi_extern_python_start') + self._inside_extern_python = decl.name else: - if self._inside_extern_python: + if self._inside_extern_python !='__cffi_extern_python_stop': raise api.CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1145,11 +1145,11 @@ def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype - def _generate_cpy_dllexport_python_collecttype(self, tp, name): - self._generate_cpy_extern_python_collecttype(tp, name) - - def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1184,11 +1184,7 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - if dllexport: - tag = 'CFFI_DLLEXPORT' - else: - tag = 'static' - prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1206,8 +1202,14 @@ prnt() self._num_externpy += 1 + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + def _generate_cpy_dllexport_python_decl(self, tp, name): - self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: @@ -1220,8 +1222,9 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - def _generate_cpy_dllexport_python_ctx(self, tp, name): - self._generate_cpy_extern_python_ctx(tp, name) + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx def _string_literal(self, s): def _char_repr(c): diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1510,7 +1510,9 @@ void boz(void); } """) - lib = verify(ffi, 'test_extern_python_1', "") + lib = verify(ffi, 'test_extern_python_1', """ + static void baz(int, int); /* forward */ + """) assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)") with FdWriteCapture() as f: res = lib.bar(4, 5) @@ -1744,6 +1746,35 @@ assert lib.mycb1(200) == 242 assert lib.indirect_call(300) == 342 +def test_extern_python_plus_c(): + ffi = FFI() + ffi.cdef(""" + extern "Python+C" int foo(int); + extern "C +\tPython" int bar(int); + int call_me(int); + """) + lib = verify(ffi, 'test_extern_python_plus_c', """ + int foo(int); + #ifdef __GNUC__ + __attribute__((visibility("hidden"))) + #endif + int bar(int); + + static int call_me(int x) { + return foo(x) - bar(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x * 42 + @ffi.def_extern() + def bar(x): + return x * 63 + assert lib.foo(100) == 4200 + assert lib.bar(100) == 6300 + assert lib.call_me(100) == -2100 + def test_introspect_function(): ffi = FFI() ffi.cdef("float f1(double);") From pypy.commits at gmail.com Mon Apr 18 03:52:57 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 00:52:57 -0700 (PDT) Subject: [pypy-commit] cffi default: Still delay this a bit longer Message-ID: <57149259.0976c20a.c0c21.fffffd86@mx.google.com> Author: Armin Rigo Branch: Changeset: r2668:5d45c8d8df09 Date: 2016-04-18 09:53 +0200 http://bitbucket.org/cffi/cffi/changeset/5d45c8d8df09/ Log: Still delay this a bit longer diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1352,8 +1352,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 6): - py.test.skip("re-enable me in version 1.6") + if __version_info__ < (1, 7): + py.test.skip("re-enable me in version 1.7") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " From pypy.commits at gmail.com Mon Apr 18 03:54:37 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 00:54:37 -0700 (PDT) Subject: [pypy-commit] pypy default: import cffi/5d45c8d8df09 and add missing file Message-ID: <571492bd.432f1c0a.9bf72.ffffa30e@mx.google.com> Author: Armin Rigo Branch: Changeset: r83733:2b8b83be158d Date: 2016-04-18 09:54 +0200 http://bitbucket.org/pypy/pypy/changeset/2b8b83be158d/ Log: import cffi/5d45c8d8df09 and add missing file diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,7 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -88,6 +89,12 @@ # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) @@ -98,7 +105,10 @@ #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) - parts.append('void __cffi_extern_python_start; ') + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) @@ -302,7 +312,7 @@ break # try: - self._inside_extern_python = False + self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -376,8 +386,10 @@ tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' - elif self._inside_extern_python: + elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) @@ -421,11 +433,9 @@ # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" - self._inside_extern_python = not self._inside_extern_python - assert self._inside_extern_python == ( - decl.name == '__cffi_extern_python_start') + self._inside_extern_python = decl.name else: - if self._inside_extern_python: + if self._inside_extern_python !='__cffi_extern_python_stop': raise api.CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1145,11 +1145,11 @@ def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype - def _generate_cpy_dllexport_python_collecttype(self, tp, name): - self._generate_cpy_extern_python_collecttype(tp, name) - - def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1184,11 +1184,7 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - if dllexport: - tag = 'CFFI_DLLEXPORT' - else: - tag = 'static' - prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1206,8 +1202,14 @@ prnt() self._num_externpy += 1 + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + def _generate_cpy_dllexport_python_decl(self, tp, name): - self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: @@ -1220,8 +1222,9 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - def _generate_cpy_dllexport_python_ctx(self, tp, name): - self._generate_cpy_extern_python_ctx(tp, name) + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx def _string_literal(self, s): def _char_repr(c): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 6): - py.test.skip("re-enable me in version 1.6") + if __version_info__ < (1, 7): + py.test.skip("re-enable me in version 1.7") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1511,7 +1511,9 @@ void boz(void); } """) - lib = verify(ffi, 'test_extern_python_1', "") + lib = verify(ffi, 'test_extern_python_1', """ + static void baz(int, int); /* forward */ + """) assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)") with FdWriteCapture() as f: res = lib.bar(4, 5) @@ -1745,6 +1747,35 @@ assert lib.mycb1(200) == 242 assert lib.indirect_call(300) == 342 +def test_extern_python_plus_c(): + ffi = FFI() + ffi.cdef(""" + extern "Python+C" int foo(int); + extern "C +\tPython" int bar(int); + int call_me(int); + """) + lib = verify(ffi, 'test_extern_python_plus_c', """ + int foo(int); + #ifdef __GNUC__ + __attribute__((visibility("hidden"))) + #endif + int bar(int); + + static int call_me(int x) { + return foo(x) - bar(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x * 42 + @ffi.def_extern() + def bar(x): + return x * 63 + assert lib.foo(100) == 4200 + assert lib.bar(100) == 6300 + assert lib.call_me(100) == -2100 + def test_introspect_function(): ffi = FFI() ffi.cdef("float f1(double);") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py @@ -0,0 +1,11 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api("") + +ffi.set_source("_empty_cffi", "") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) From pypy.commits at gmail.com Mon Apr 18 04:08:47 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Apr 2016 01:08:47 -0700 (PDT) Subject: [pypy-commit] pypy default: rewritten test, sleeping up to ~25 second to wait for the initial thread to start Message-ID: <5714960f.89cbc20a.418ed.4db7@mx.google.com> Author: Richard Plangger Branch: Changeset: r83734:e7c64f263b43 Date: 2016-04-18 10:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e7c64f263b43/ Log: rewritten test, sleeping up to ~25 second to wait for the initial thread to start diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -246,8 +246,6 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' - @py.test.mark.xfail(platform.machine() == 's390x', - reason='may fail this test under heavy load') def test_tlref_keepalive(self, no__thread=True): import weakref from rpython.config.translationoption import SUPPORT__THREAD @@ -289,7 +287,12 @@ wr_from_thread.seen = False start_new_thread(thread_entry_point, ()) wr1 = f() - time.sleep(0.5) + count = 0 + while True: + time.sleep(0.5) + if wr_from_thread.seen or count >= 50: + break + count += 1 assert wr_from_thread.seen is True wr2 = wr_from_thread.wr import gc; gc.collect() # wr2() should be collected here @@ -302,7 +305,5 @@ res = fn() assert res == 42 - @py.test.mark.xfail(platform.machine() == 's390x', - reason='may fail this test under heavy load') def test_tlref_keepalive__thread(self): self.test_tlref_keepalive(no__thread=False) From pypy.commits at gmail.com Mon Apr 18 04:51:57 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Apr 2016 01:51:57 -0700 (PDT) Subject: [pypy-commit] pypy default: (s390x) overwrote already defined variable, lead to wrong assignment Message-ID: <5714a02d.2976c20a.11f84.4380@mx.google.com> Author: Richard Plangger Branch: Changeset: r83735:99bd59dfb2dd Date: 2016-04-18 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/99bd59dfb2dd/ Log: (s390x) overwrote already defined variable, lead to wrong assignment diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -310,34 +310,26 @@ # uff! in this case, we need to move a forbidden var to another register assert len(forbidden_vars) <= 8 # otherwise it is NOT possible to complete even, odd = r.r2, r.r3 - even_var = reverse_mapping.get(even, None) - odd_var = reverse_mapping.get(odd, None) - if even_var: - if even_var in forbidden_vars: - self._relocate_forbidden_variable(even, even_var, reverse_mapping, + old_even_var = reverse_mapping.get(even, None) + old_odd_var = reverse_mapping.get(odd, None) + if old_even_var: + if old_even_var in forbidden_vars: + self._relocate_forbidden_variable(even, old_even_var, reverse_mapping, forbidden_vars, odd) else: - self._sync_var(even_var) - if odd_var: - if odd_var in forbidden_vars: - self._relocate_forbidden_variable(odd, odd_var, reverse_mapping, + self._sync_var(old_even_var) + del self.reg_bindings[old_even_var] + if old_odd_var: + if old_odd_var in forbidden_vars: + self._relocate_forbidden_variable(odd, old_odd_var, reverse_mapping, forbidden_vars, even) else: - self._sync_var(odd_var) + self._sync_var(old_odd_var) + del self.reg_bindings[old_odd_var] self.free_regs = [fr for fr in self.free_regs \ if fr is not even and \ fr is not odd] - if not even_var: - even_var = TempVar() - self.longevity[even_var] = (self.position, self.position) - self.temp_boxes.append(even_var) - if not odd_var: - odd_var = TempVar() - self.longevity[odd_var] = (self.position, self.position) - self.temp_boxes.append(odd_var) - assert even_var is not None - assert odd_var is not None self.reg_bindings[even_var] = even self.reg_bindings[odd_var] = odd return even, odd @@ -345,6 +337,12 @@ return even, odd def _relocate_forbidden_variable(self, reg, var, reverse_mapping, forbidden_vars, forbidden_reg): + if len(self.free_regs) > 0: + candidate = self.free_regs.pop() + self.assembler.regalloc_mov(reg, candidate) + self.reg_bindings[var] = candidate + reverse_mapping[candidate] = var + for candidate in r.MANAGED_REGS: # move register of var to another register # thus it is not allowed to bei either reg or forbidden_reg @@ -355,10 +353,11 @@ if not candidate_var or candidate_var not in forbidden_vars: if candidate_var is not None: self._sync_var(candidate_var) + del self.reg_bindings[candidate_var] self.assembler.regalloc_mov(reg, candidate) assert var is not None self.reg_bindings[var] = candidate - reverse_mapping[reg] = var + reverse_mapping[candidate] = var self.free_regs.append(reg) break else: From pypy.commits at gmail.com Mon Apr 18 06:11:39 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 03:11:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Rewrite resource.py to use cffi instead of ctypes_config_cache Message-ID: <5714b2db.939d1c0a.2cf81.ffffc9c6@mx.google.com> Author: Armin Rigo Branch: Changeset: r83736:09ae247ce9d7 Date: 2016-04-18 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/09ae247ce9d7/ Log: Rewrite resource.py to use cffi instead of ctypes_config_cache diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_resource_build.py @@ -0,0 +1,109 @@ +from cffi import FFI + +ffi = FFI() + +# Note: we don't directly expose 'struct timeval' or 'struct rlimit' + + +rlimit_consts = ''' +RLIMIT_CPU +RLIMIT_FSIZE +RLIMIT_DATA +RLIMIT_STACK +RLIMIT_CORE +RLIMIT_NOFILE +RLIMIT_OFILE +RLIMIT_VMEM +RLIMIT_AS +RLIMIT_RSS +RLIMIT_NPROC +RLIMIT_MEMLOCK +RLIMIT_SBSIZE +RLIM_INFINITY +RUSAGE_SELF +RUSAGE_CHILDREN +RUSAGE_BOTH +'''.split() + +rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s) + for s in rlimit_consts] + + +ffi.set_source("_resource_cffi", """ +#include + +static const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[] = { +$RLIMIT_CONSTS + { NULL, 0 } +}; + +#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001) + +static int my_getrusage(int who, struct rusage *result, double times[2]) +{ + if (getrusage(who, result) == -1) + return -1; + times[0] = doubletime(result->ru_utime); + times[1] = doubletime(result->ru_stime); + return 0; +} + +static int my_getrlimit(int resource, long long result[2]) +{ + struct rlimit rl; + if (getrlimit(resource, &rl) == -1) + return -1; + result[0] = rl.rlim_cur; + result[1] = rl.rlim_max; + return 0; +} + +static int my_setrlimit(int resource, long long cur, long long max) +{ + struct rlimit rl; + rl.rlim_cur = cur & RLIM_INFINITY; + rl.rlim_max = max & RLIM_INFINITY; + return setrlimit(resource, &rl); +} + +""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts))) + + +ffi.cdef(""" + +#define RLIM_NLIMITS ... + +const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[]; + +struct rusage { + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; + ...; +}; + +void my_getrusage(int who, struct rusage *result, double times[2]); +int my_getrlimit(int resource, long long result[2]); +int my_setrlimit(int resource, long long cur, long long max); +""") + + +if __name__ == "__main__": + ffi.compile() diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -1,4 +1,4 @@ -# ctypes implementation: Victor Stinner, 2008-05-08 +# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides access to the Unix password database. It is available on all Unix versions. diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -1,15 +1,8 @@ -import sys -if sys.platform == 'win32': - raise ImportError('resource module not available for win32') +"""http://docs.python.org/library/resource""" -# load the platform-specific cache made by running resource.ctc.py -from ctypes_config_cache._resource_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno -from ctypes import Structure, c_int, c_long, byref, POINTER +from _resource_cffi import ffi, lib from errno import EINVAL, EPERM -import _structseq +import _structseq, os try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -18,106 +11,47 @@ class error(Exception): pass +class struct_rusage: + """struct_rusage: Result from getrusage. -# Read required libc functions -_getrusage = libc.getrusage -_getrlimit = libc.getrlimit -_setrlimit = libc.setrlimit -try: - _getpagesize = libc.getpagesize - _getpagesize.argtypes = () - _getpagesize.restype = c_int -except AttributeError: - from os import sysconf - _getpagesize = None +This object may be accessed either as a tuple of + (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt, + nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw) +or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.""" - -class timeval(Structure): - _fields_ = ( - ("tv_sec", c_long), - ("tv_usec", c_long), - ) - def __str__(self): - return "(%s, %s)" % (self.tv_sec, self.tv_usec) - - def __float__(self): - return self.tv_sec + self.tv_usec/1000000.0 - -class _struct_rusage(Structure): - _fields_ = ( - ("ru_utime", timeval), - ("ru_stime", timeval), - ("ru_maxrss", c_long), - ("ru_ixrss", c_long), - ("ru_idrss", c_long), - ("ru_isrss", c_long), - ("ru_minflt", c_long), - ("ru_majflt", c_long), - ("ru_nswap", c_long), - ("ru_inblock", c_long), - ("ru_oublock", c_long), - ("ru_msgsnd", c_long), - ("ru_msgrcv", c_long), - ("ru_nsignals", c_long), - ("ru_nvcsw", c_long), - ("ru_nivcsw", c_long), - ) - -_getrusage.argtypes = (c_int, POINTER(_struct_rusage)) -_getrusage.restype = c_int - - -class struct_rusage: __metaclass__ = _structseq.structseqtype - ru_utime = _structseq.structseqfield(0) - ru_stime = _structseq.structseqfield(1) - ru_maxrss = _structseq.structseqfield(2) - ru_ixrss = _structseq.structseqfield(3) - ru_idrss = _structseq.structseqfield(4) - ru_isrss = _structseq.structseqfield(5) - ru_minflt = _structseq.structseqfield(6) - ru_majflt = _structseq.structseqfield(7) - ru_nswap = _structseq.structseqfield(8) - ru_inblock = _structseq.structseqfield(9) - ru_oublock = _structseq.structseqfield(10) - ru_msgsnd = _structseq.structseqfield(11) - ru_msgrcv = _structseq.structseqfield(12) - ru_nsignals = _structseq.structseqfield(13) - ru_nvcsw = _structseq.structseqfield(14) - ru_nivcsw = _structseq.structseqfield(15) - - at builtinify -def rlimit_check_bounds(rlim_cur, rlim_max): - if rlim_cur > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_cur) - if rlim_max > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_max) - -class rlimit(Structure): - _fields_ = ( - ("rlim_cur", rlim_t), - ("rlim_max", rlim_t), - ) - -_getrlimit.argtypes = (c_int, POINTER(rlimit)) -_getrlimit.restype = c_int -_setrlimit.argtypes = (c_int, POINTER(rlimit)) -_setrlimit.restype = c_int + ru_utime = _structseq.structseqfield(0, "user time used") + ru_stime = _structseq.structseqfield(1, "system time used") + ru_maxrss = _structseq.structseqfield(2, "max. resident set size") + ru_ixrss = _structseq.structseqfield(3, "shared memory size") + ru_idrss = _structseq.structseqfield(4, "unshared data size") + ru_isrss = _structseq.structseqfield(5, "unshared stack size") + ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O") + ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O") + ru_nswap = _structseq.structseqfield(8, "number of swap outs") + ru_inblock = _structseq.structseqfield(9, "block input operations") + ru_oublock = _structseq.structseqfield(10, "block output operations") + ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent") + ru_msgrcv = _structseq.structseqfield(12, "IPC messages received") + ru_nsignals = _structseq.structseqfield(13,"signals received") + ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches") + ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches") @builtinify def getrusage(who): - ru = _struct_rusage() - ret = _getrusage(who, byref(ru)) + ru = ffi.new("struct rusage *") + times = ffi.new("double[2]") + ret = lib.my_getrusage(who, ru, times) if ret == -1: - errno = get_errno() + errno = ffi.errno if errno == EINVAL: raise ValueError("invalid who parameter") raise error(errno) return struct_rusage(( - float(ru.ru_utime), - float(ru.ru_stime), + times[0], + times[1], ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, @@ -136,47 +70,49 @@ @builtinify def getrlimit(resource): - if not(0 <= resource < RLIM_NLIMITS): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlim = rlimit() - ret = _getrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - raise error(errno) - return (rlim.rlim_cur, rlim.rlim_max) + result = ffi.new("long long[2]") + if lib.my_getrlimit(resource, result) == -1: + raise error(ffi.errno) + return (result[0], result[1]) @builtinify -def setrlimit(resource, rlim): - if not(0 <= resource < RLIM_NLIMITS): +def setrlimit(resource, limits): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlimit_check_bounds(*rlim) - rlim = rlimit(rlim[0], rlim[1]) - ret = _setrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - return ValueError("current limit exceeds maximum limit") - elif errno == EPERM: - return ValueError("not allowed to raise maximum limit") + limits = tuple(limits) + if len(limits) != 2: + raise ValueError("expected a tuple of 2 integers") + + if lib.my_setrlimit(resource, limits[0], limits[1]) == -1: + if ffi.errno == EINVAL: + raise ValueError("current limit exceeds maximum limit") + elif ffi.errno == EPERM: + raise ValueError("not allowed to raise maximum limit") else: - raise error(errno) + raise error(ffi.errno) + @builtinify def getpagesize(): - if _getpagesize: - return _getpagesize() - else: - try: - return sysconf("SC_PAGE_SIZE") - except ValueError: - # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE - return sysconf("SC_PAGESIZE") + return os.sysconf("SC_PAGESIZE") -__all__ = ALL_CONSTANTS + ( - 'error', 'timeval', 'struct_rusage', 'rlimit', - 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize', + +def _setup(): + all_constants = [] + p = lib.my_rlimit_consts + while p.name: + name = ffi.string(p.name) + globals()[name] = int(p.value) + all_constants.append(name) + p += 1 + return all_constants + +__all__ = tuple(_setup()) + ( + 'error', 'getpagesize', 'struct_rusage', + 'getrusage', 'getrlimit', 'setrlimit', ) - -del ALL_CONSTANTS +del _setup diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -1,44 +1,46 @@ from __future__ import absolute_import -from lib_pypy.ctypes_config_cache import rebuild -from pypy.module.test_lib_pypy.support import import_lib_pypy - import os if os.name != 'posix': skip('resource.h only available on unix') -class AppTestResource: +from lib_pypy import resource - spaceconfig = dict(usemodules=('_rawffi', 'itertools')) - def setup_class(cls): - rebuild.rebuild_one('resource.ctc.py') - cls.w_resource = import_lib_pypy(cls.space, 'resource', - 'No resource module available') +def test_getrusage(): + x = resource.getrusage(resource.RUSAGE_SELF) + assert len(x) == 16 + assert x[0] == x[-16] == x.ru_utime + assert x[1] == x[-15] == x.ru_stime + assert x[2] == x[-14] == x.ru_maxrss + assert x[3] == x[-13] == x.ru_ixrss + assert x[4] == x[-12] == x.ru_idrss + assert x[5] == x[-11] == x.ru_isrss + assert x[6] == x[-10] == x.ru_minflt + assert x[7] == x[-9] == x.ru_majflt + assert x[8] == x[-8] == x.ru_nswap + assert x[9] == x[-7] == x.ru_inblock + assert x[10] == x[-6] == x.ru_oublock + assert x[11] == x[-5] == x.ru_msgsnd + assert x[12] == x[-4] == x.ru_msgrcv + assert x[13] == x[-3] == x.ru_nsignals + assert x[14] == x[-2] == x.ru_nvcsw + assert x[15] == x[-1] == x.ru_nivcsw + for i in range(16): + if i < 2: + expected_type = float + else: + expected_type = (int, long) + assert isinstance(x[i], expected_type) - def test_resource(self): - resource = self.resource - x = resource.getrusage(resource.RUSAGE_SELF) - assert len(x) == 16 - assert x[0] == x[-16] == x.ru_utime - assert x[1] == x[-15] == x.ru_stime - assert x[2] == x[-14] == x.ru_maxrss - assert x[3] == x[-13] == x.ru_ixrss - assert x[4] == x[-12] == x.ru_idrss - assert x[5] == x[-11] == x.ru_isrss - assert x[6] == x[-10] == x.ru_minflt - assert x[7] == x[-9] == x.ru_majflt - assert x[8] == x[-8] == x.ru_nswap - assert x[9] == x[-7] == x.ru_inblock - assert x[10] == x[-6] == x.ru_oublock - assert x[11] == x[-5] == x.ru_msgsnd - assert x[12] == x[-4] == x.ru_msgrcv - assert x[13] == x[-3] == x.ru_nsignals - assert x[14] == x[-2] == x.ru_nvcsw - assert x[15] == x[-1] == x.ru_nivcsw - for i in range(16): - if i < 2: - expected_type = float - else: - expected_type = (int, long) - assert isinstance(x[i], expected_type) +def test_getrlimit(): + x = resource.getrlimit(resource.RLIMIT_CPU) + assert isinstance(x, tuple) + assert len(x) == 2 + assert isinstance(x[0], (int, long)) + assert isinstance(x[1], (int, long)) + +def test_setrlimit(): + # minimal "does not crash" test + x = resource.getrlimit(resource.RLIMIT_CPU) + resource.setrlimit(resource.RLIMIT_CPU, x) From pypy.commits at gmail.com Mon Apr 18 06:11:41 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 03:11:41 -0700 (PDT) Subject: [pypy-commit] pypy default: port _pypy_wait Message-ID: <5714b2dd.cfa81c0a.ba99e.3367@mx.google.com> Author: Armin Rigo Branch: Changeset: r83737:0c48102c8709 Date: 2016-04-18 12:09 +0200 http://bitbucket.org/pypy/pypy/changeset/0c48102c8709/ Log: port _pypy_wait diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,51 +1,22 @@ -from resource import _struct_rusage, struct_rusage -from ctypes import CDLL, c_int, POINTER, byref -from ctypes.util import find_library +from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] -libc = CDLL(find_library("c")) -c_wait3 = libc.wait3 -c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait3.restype = c_int - -c_wait4 = libc.wait4 -c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait4.restype = c_int - -def create_struct_rusage(c_struct): - return struct_rusage(( - float(c_struct.ru_utime), - float(c_struct.ru_stime), - c_struct.ru_maxrss, - c_struct.ru_ixrss, - c_struct.ru_idrss, - c_struct.ru_isrss, - c_struct.ru_minflt, - c_struct.ru_majflt, - c_struct.ru_nswap, - c_struct.ru_inblock, - c_struct.ru_oublock, - c_struct.ru_msgsnd, - c_struct.ru_msgrcv, - c_struct.ru_nsignals, - c_struct.ru_nvcsw, - c_struct.ru_nivcsw)) def wait3(options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait3(status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage def wait4(pid, options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait4(pid, status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py --- a/lib_pypy/_resource_build.py +++ b/lib_pypy/_resource_build.py @@ -30,7 +30,10 @@ ffi.set_source("_resource_cffi", """ +#include +#include #include +#include static const struct my_rlimit_def { const char *name; @@ -42,13 +45,14 @@ #define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001) -static int my_getrusage(int who, struct rusage *result, double times[2]) +static double my_utime(struct rusage *input) { - if (getrusage(who, result) == -1) - return -1; - times[0] = doubletime(result->ru_utime); - times[1] = doubletime(result->ru_stime); - return 0; + return doubletime(input->ru_utime); +} + +static double my_stime(struct rusage *input) +{ + return doubletime(input->ru_stime); } static int my_getrlimit(int resource, long long result[2]) @@ -99,9 +103,14 @@ ...; }; -void my_getrusage(int who, struct rusage *result, double times[2]); +static double my_utime(struct rusage *); +static double my_stime(struct rusage *); +void getrusage(int who, struct rusage *result); int my_getrlimit(int resource, long long result[2]); int my_setrlimit(int resource, long long cur, long long max); + +int wait3(int *status, int options, struct rusage *rusage); +int wait4(int pid, int *status, int options, struct rusage *rusage); """) diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -38,20 +38,10 @@ ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches") ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches") - - at builtinify -def getrusage(who): - ru = ffi.new("struct rusage *") - times = ffi.new("double[2]") - ret = lib.my_getrusage(who, ru, times) - if ret == -1: - errno = ffi.errno - if errno == EINVAL: - raise ValueError("invalid who parameter") - raise error(errno) +def _make_struct_rusage(ru): return struct_rusage(( - times[0], - times[1], + lib.my_utime(ru), + lib.my_stime(ru), ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, @@ -69,6 +59,15 @@ )) @builtinify +def getrusage(who): + ru = ffi.new("struct rusage *") + if lib.getrusage(who, ru) == -1: + if ffi.errno == EINVAL: + raise ValueError("invalid who parameter") + raise error(ffi.errno) + return _make_struct_rusage(ru) + + at builtinify def getrlimit(resource): if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,51 +1,36 @@ -# Generates the resource cache (it might be there already, but maybe not) +# Assumes that _resource_cffi is there already from __future__ import absolute_import import os +import py +from pypy.module.test_lib_pypy import test_resource # side-effect: skip() -import py -from lib_pypy.ctypes_config_cache import rebuild -from pypy.module.test_lib_pypy.support import import_lib_pypy +from lib_pypy import _pypy_wait +def test_os_wait3(): + wait3 = _pypy_wait.wait3 + exit_status = 0x33 + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) -class AppTestOsWait: - - spaceconfig = dict(usemodules=('_rawffi', 'itertools')) - - def setup_class(cls): - if not hasattr(os, "fork"): - py.test.skip("Need fork() to test wait3/wait4()") - rebuild.rebuild_one('resource.ctc.py') - cls.w__pypy_wait = import_lib_pypy( - cls.space, '_pypy_wait', - '_pypy_wait not supported on this platform') - - def test_os_wait3(self): - import os - wait3 = self._pypy_wait.wait3 - exit_status = 0x33 - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = wait3(0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) - - def test_os_wait4(self): - import os - wait4 = self._pypy_wait.wait4 - exit_status = 0x33 - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = wait4(child, 0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) +def test_os_wait4(): + wait4 = _pypy_wait.wait4 + exit_status = 0x33 + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -4,7 +4,10 @@ if os.name != 'posix': skip('resource.h only available on unix') -from lib_pypy import resource +try: + from lib_pypy import resource +except ImportError as e: + skip(str(e)) def test_getrusage(): From pypy.commits at gmail.com Mon Apr 18 06:11:43 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 03:11:43 -0700 (PDT) Subject: [pypy-commit] pypy default: Kill ctypes_config_cache Message-ID: <5714b2df.972e1c0a.de35b.ffffbfdc@mx.google.com> Author: Armin Rigo Branch: Changeset: r83738:f6b866583555 Date: 2016-04-18 12:11 +0200 http://bitbucket.org/pypy/pypy/changeset/f6b866583555/ Log: Kill ctypes_config_cache diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py deleted file mode 100644 diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/dumpcache.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys, os -from ctypes_configure import dumpcache - -def dumpcache2(basename, config): - size = 32 if sys.maxint <= 2**32 else 64 - filename = '_%s_%s_.py' % (basename, size) - dumpcache.dumpcache(__file__, filename, config) - # - filename = os.path.join(os.path.dirname(__file__), - '_%s_cache.py' % (basename,)) - g = open(filename, 'w') - print >> g, '''\ -import sys -_size = 32 if sys.maxint <= 2**32 else 64 -# XXX relative import, should be removed together with -# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib -_mod = __import__("_%s_%%s_" %% (_size,), - globals(), locals(), ["*"]) -globals().update(_mod.__dict__)\ -''' % (basename,) - g.close() diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/locale.ctc.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -'ctypes_configure' source for _locale.py. -Run this to rebuild _locale_cache.py. -""" - -from ctypes_configure.configure import (configure, ExternalCompilationInfo, - ConstantInteger, DefinedConstantInteger, SimpleType, check_eci) -import dumpcache - -# ____________________________________________________________ - -_CONSTANTS = [ - 'LC_CTYPE', - 'LC_TIME', - 'LC_COLLATE', - 'LC_MONETARY', - 'LC_MESSAGES', - 'LC_NUMERIC', - 'LC_ALL', - 'CHAR_MAX', -] - -class LocaleConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['limits.h', - 'locale.h']) -for key in _CONSTANTS: - setattr(LocaleConfigure, key, DefinedConstantInteger(key)) - -config = configure(LocaleConfigure, noerr=True) -for key, value in config.items(): - if value is None: - del config[key] - _CONSTANTS.remove(key) - -# ____________________________________________________________ - -eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h']) -HAS_LANGINFO = check_eci(eci) - -if HAS_LANGINFO: - # list of all possible names - langinfo_names = [ - "RADIXCHAR", "THOUSEP", "CRNCYSTR", - "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR", - "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT", - "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT", - ] - for i in range(1, 8): - langinfo_names.append("DAY_%d" % i) - langinfo_names.append("ABDAY_%d" % i) - for i in range(1, 13): - langinfo_names.append("MON_%d" % i) - langinfo_names.append("ABMON_%d" % i) - - class LanginfoConfigure: - _compilation_info_ = eci - nl_item = SimpleType('nl_item') - for key in langinfo_names: - setattr(LanginfoConfigure, key, DefinedConstantInteger(key)) - - langinfo_config = configure(LanginfoConfigure) - for key, value in langinfo_config.items(): - if value is None: - del langinfo_config[key] - langinfo_names.remove(key) - config.update(langinfo_config) - _CONSTANTS += langinfo_names - -# ____________________________________________________________ - -config['ALL_CONSTANTS'] = tuple(_CONSTANTS) -config['HAS_LANGINFO'] = HAS_LANGINFO -dumpcache.dumpcache2('locale', config) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py deleted file mode 100755 --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# Run this script to rebuild all caches from the *.ctc.py files. - -import os, sys - -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) - -import py - -_dirpath = os.path.dirname(__file__) or os.curdir - -from rpython.tool.ansi_print import AnsiLogger -log = AnsiLogger("ctypes_config_cache") - - -def rebuild_one(name): - filename = os.path.join(_dirpath, name) - d = {'__file__': filename} - path = sys.path[:] - try: - sys.path.insert(0, _dirpath) - execfile(filename, d) - finally: - sys.path[:] = path - -def try_rebuild(): - size = 32 if sys.maxint <= 2**32 else 64 - # remove the files '_*_size_.py' - left = {} - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_%s_.py' % size) or - p.endswith('_%s_.pyc' % size)): - os.unlink(os.path.join(_dirpath, p)) - elif p.startswith('_') and (p.endswith('_.py') or - p.endswith('_.pyc')): - for i in range(2, len(p)-4): - left[p[:i]] = True - # remove the files '_*_cache.py' if there is no '_*_*_.py' left around - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_cache.py') or - p.endswith('_cache.pyc')): - if p[:-9] not in left: - os.unlink(os.path.join(_dirpath, p)) - # - for p in os.listdir(_dirpath): - if p.endswith('.ctc.py'): - try: - rebuild_one(p) - except Exception, e: - log.ERROR("Running %s:\n %s: %s" % ( - os.path.join(_dirpath, p), - e.__class__.__name__, e)) - - -if __name__ == '__main__': - try_rebuild() diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/resource.ctc.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -'ctypes_configure' source for resource.py. -Run this to rebuild _resource_cache.py. -""" - - -from ctypes import sizeof -import dumpcache -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger, - SimpleType) - - -_CONSTANTS = ( - 'RLIM_INFINITY', - 'RLIM_NLIMITS', -) -_OPTIONAL_CONSTANTS = ( - 'RLIMIT_CPU', - 'RLIMIT_FSIZE', - 'RLIMIT_DATA', - 'RLIMIT_STACK', - 'RLIMIT_CORE', - 'RLIMIT_RSS', - 'RLIMIT_NPROC', - 'RLIMIT_NOFILE', - 'RLIMIT_OFILE', - 'RLIMIT_MEMLOCK', - 'RLIMIT_AS', - 'RLIMIT_LOCKS', - 'RLIMIT_SIGPENDING', - 'RLIMIT_MSGQUEUE', - 'RLIMIT_NICE', - 'RLIMIT_RTPRIO', - 'RLIMIT_VMEM', - - 'RUSAGE_BOTH', - 'RUSAGE_SELF', - 'RUSAGE_CHILDREN', -) - -# Setup our configure -class ResourceConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) - rlim_t = SimpleType('rlim_t') -for key in _CONSTANTS: - setattr(ResourceConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(ResourceConfigure, key, DefinedConstantInteger(key)) - -# Configure constants and types -config = configure(ResourceConfigure) -config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 -optional_constants = [] -for key in _OPTIONAL_CONSTANTS: - if config[key] is not None: - optional_constants.append(key) - else: - del config[key] - -config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) -dumpcache.dumpcache2('resource', config) diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -340,10 +340,6 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() - space = make_objspace(config) # manually imports app_main.py diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ /dev/null @@ -1,44 +0,0 @@ -import py -import sys, os -from rpython.tool.udir import udir - -dirpath = py.path.local(__file__).dirpath().dirpath().dirpath().dirpath() -dirpath = dirpath.join('lib_pypy').join('ctypes_config_cache') - - -def run(filename, outputname): - filepath = dirpath.join(filename) - tmpdir = udir.ensure('testcache-' + os.path.splitext(filename)[0], - dir=True) - tmpdir.join('dumpcache.py').write(dirpath.join('dumpcache.py').read()) - path = sys.path[:] - sys.modules.pop('dumpcache', None) - try: - sys.path.insert(0, str(tmpdir)) - execfile(str(filepath), {}) - finally: - sys.path[:] = path - sys.modules.pop('dumpcache', None) - # - outputpath = tmpdir.join(outputname) - assert outputpath.check(exists=1) - modname = os.path.splitext(outputname)[0] - try: - sys.path.insert(0, str(tmpdir)) - d = {} - execfile(str(outputpath), d) - finally: - sys.path[:] = path - return d - - -def test_resource(): - if sys.platform == 'win32': - py.test.skip('no resource module on this platform') - d = run('resource.ctc.py', '_resource_cache.py') - assert 'RLIM_NLIMITS' in d - -def test_locale(): - d = run('locale.ctc.py', '_locale_cache.py') - assert 'LC_ALL' in d - assert 'CHAR_MAX' in d diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -169,6 +169,7 @@ # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. + # XXX ^^^ this is no longer true! shutil.copytree(str(basedir.join('lib-python').join(STDLIB_VER)), str(pypydir.join('lib-python').join(STDLIB_VER)), ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~')) From pypy.commits at gmail.com Mon Apr 18 08:13:41 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 05:13:41 -0700 (PDT) Subject: [pypy-commit] pypy default: List _resource_cffi here Message-ID: <5714cf75.06d8c20a.38efb.ffffa08c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83739:17f0d59398e4 Date: 2016-04-18 14:13 +0200 http://bitbucket.org/pypy/pypy/changeset/17f0d59398e4/ Log: List _resource_cffi here diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py --- a/pypy/tool/build_cffi_imports.py +++ b/pypy/tool/build_cffi_imports.py @@ -13,6 +13,7 @@ "syslog": "_syslog_build.py" if sys.platform != "win32" else None, "gdbm": "_gdbm_build.py" if sys.platform != "win32" else None, "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None, + "resource": "_resource_build.py" if sys.platform != "win32" else None, "xx": None, # for testing: 'None' should be completely ignored } @@ -61,8 +62,8 @@ print >> sys.stderr, "There should be no failures here" failures = create_cffi_import_libraries(exename, options, basedir) if len(failures) > 0: - print 'failed to build', [f[1] for f in failures] - assert False + print '*** failed to build', [f[1] for f in failures] + sys.exit(1) # monkey patch a failure, just to test print >> sys.stderr, 'This line should be followed by a traceback' From pypy.commits at gmail.com Mon Apr 18 08:33:48 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Apr 2016 05:33:48 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: renamed jitdriver_sd property only in one place Message-ID: <5714d42c.a2f2c20a.320cd.ffffbcc7@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83740:6f86f7af28ab Date: 2016-04-18 14:32 +0200 http://bitbucket.org/pypy/pypy/changeset/6f86f7af28ab/ Log: renamed jitdriver_sd property only in one place diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -568,7 +568,7 @@ # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: - jd._get_printable_location_ptr = self._make_hook_graph(jd, + jd._printable_loc_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_printable_location, s_Str) jd._get_unique_id_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_unique_id, annmodel.SomeInteger()) From pypy.commits at gmail.com Mon Apr 18 08:37:48 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Apr 2016 05:37:48 -0700 (PDT) Subject: [pypy-commit] pypy default: use TempInt instead of TempVar, the latter has not type property Message-ID: <5714d51c.4c981c0a.b9bcf.72d3@mx.google.com> Author: Richard Plangger Branch: Changeset: r83741:dd16545e1bfd Date: 2016-04-18 14:37 +0200 http://bitbucket.org/pypy/pypy/changeset/dd16545e1bfd/ Log: use TempInt instead of TempVar, the latter has not type property diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -182,9 +182,9 @@ """ self._check_type(origvar) prev_loc = self.loc(origvar, must_exist=must_exist) - var2 = TempVar() + var2 = TempInt() if bindvar is None: - bindvar = TempVar() + bindvar = TempInt() if bind_first: loc, loc2 = self.force_allocate_reg_pair(bindvar, var2, self.temp_boxes) else: From pypy.commits at gmail.com Mon Apr 18 09:01:10 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 06:01:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Failing test Message-ID: <5714da96.10981c0a.dd5fc.128d@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83742:b46cd34744c1 Date: 2016-04-18 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b46cd34744c1/ Log: Failing test diff --git a/pypy/module/cpyext/test/test_iterator.py b/pypy/module/cpyext/test/test_iterator.py --- a/pypy/module/cpyext/test/test_iterator.py +++ b/pypy/module/cpyext/test/test_iterator.py @@ -1,4 +1,5 @@ from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class TestIterator(BaseApiTest): @@ -20,3 +21,42 @@ assert api.PyIter_Next(space.w_None) is None assert api.PyErr_Occurred() is space.w_TypeError api.PyErr_Clear() + + +class AppTestIterator(AppTestCpythonExtensionBase): + def test_noniterable_object_with_mapping_interface(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + ''' + PyObject *obj; + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo_Type.tp_as_mapping = &tp_as_mapping; + tp_as_mapping.mp_length = mp_length; + tp_as_mapping.mp_subscript = mp_subscript; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + obj = PyObject_New(PyObject, &Foo_Type); + return obj; + ''' + )], + ''' + static PyObject * + mp_subscript(PyObject *self, PyObject *key) + { + return PyInt_FromLong(42); + } + static Py_ssize_t + mp_length(PyObject *self) + { + return 2; + } + PyMappingMethods tp_as_mapping; + static PyTypeObject Foo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo", + }; + ''') + obj = module.test() + assert obj["hi there"] == 42 + assert len(obj) == 2 + e = raises(TypeError, iter, obj) + assert str(e.value).endswith("object is not iterable") From pypy.commits at gmail.com Mon Apr 18 10:30:05 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 07:30:05 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Add another test in cpyext, and fix, including to Message-ID: <5714ef6d.8673c20a.c9221.ffffebfc@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83743:6940834b346d Date: 2016-04-18 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6940834b346d/ Log: Add another test in cpyext, and fix, including to operator.is{Sequence,Mapping}Type() diff --git a/pypy/module/cpyext/test/test_iterator.py b/pypy/module/cpyext/test/test_iterator.py --- a/pypy/module/cpyext/test/test_iterator.py +++ b/pypy/module/cpyext/test/test_iterator.py @@ -58,5 +58,53 @@ obj = module.test() assert obj["hi there"] == 42 assert len(obj) == 2 + assert not hasattr(obj, "__iter__") e = raises(TypeError, iter, obj) assert str(e.value).endswith("object is not iterable") + # + import operator + assert not operator.isSequenceType(obj) + assert operator.isMappingType(obj) + + def test_iterable_nonmapping_object(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + ''' + PyObject *obj; + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo_Type.tp_as_sequence = &tp_as_sequence; + tp_as_sequence.sq_length = sq_length; + tp_as_sequence.sq_item = sq_item; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + obj = PyObject_New(PyObject, &Foo_Type); + return obj; + ''' + )], + ''' + static PyObject * + sq_item(PyObject *self, Py_ssize_t size) + { + return PyInt_FromLong(42); + } + static Py_ssize_t + sq_length(PyObject *self) + { + return 2; + } + PySequenceMethods tp_as_sequence; + static PyTypeObject Foo_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo", + }; + ''') + obj = module.test() + assert obj[1] == 42 + assert len(obj) == 2 + assert not hasattr(obj, "__iter__") + it = iter(obj) + assert it.next() == 42 + assert it.next() == 42 + # + import operator + assert operator.isSequenceType(obj) + assert not operator.isMappingType(obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -385,6 +385,12 @@ if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True self.flag_heaptype = False + # if a sequence or a mapping, then set the flag to force it + if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: + self.flag_map_or_seq = 'S' + elif (pto.c_tp_as_mapping and pto.c_tp_as_mapping.c_mp_subscript and + not (pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_slice)): + self.flag_map_or_seq = 'M' if pto.c_tp_doc: self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc)) diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -18,7 +18,7 @@ app_names = ['__delslice__', '__getslice__', '__repeat__', '__setslice__', 'countOf', 'delslice', 'getslice', 'indexOf', - 'isMappingType', 'isNumberType', 'isSequenceType', + 'isNumberType', 'repeat', 'setslice', 'attrgetter', 'itemgetter', 'methodcaller', ] @@ -36,7 +36,8 @@ 'sub', 'truediv', 'truth', 'xor', 'iadd', 'iand', 'iconcat', 'idiv', 'ifloordiv', 'ilshift', 'imod', 'imul', 'ior', 'ipow', 'irepeat', - 'irshift', 'isub', 'itruediv', 'ixor', '_length_hint'] + 'irshift', 'isub', 'itruediv', 'ixor', '_length_hint', + 'isSequenceType', 'isMappingType'] interpleveldefs = { '_compare_digest': 'tscmp.compare_digest', diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -6,6 +6,7 @@ ''' import types +import __pypy__ def countOf(a,b): @@ -39,27 +40,18 @@ index += 1 raise ValueError('sequence.index(x): x not in sequence') -def isMappingType(obj,): - 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - if isinstance(obj, types.InstanceType): - return hasattr(obj, '__getitem__') - return hasattr(obj, '__getitem__') and not hasattr(obj, '__getslice__') - def isNumberType(obj,): 'isNumberType(a) -- Return True if a has a numeric type, False otherwise.' - return hasattr(obj, '__int__') or hasattr(obj, '__float__') - -def isSequenceType(obj,): - 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - if isinstance(obj, dict): - return False - return hasattr(obj, '__getitem__') + return (__pypy__.lookup_special(obj, '__int__') is not None or + __pypy__.lookup_special(obj, '__float__') is not None) def repeat(obj, num): 'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.' + import operator + if not isinstance(num, (int, long)): raise TypeError('an integer is required') - if not isSequenceType(obj): + if not operator.isSequenceType(obj): raise TypeError("non-sequence object can't be repeated") return obj * num diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec +from pypy.module.__builtin__.interp_classobj import W_InstanceObject def index(space, w_a): @@ -247,3 +248,33 @@ @unwrap_spec(default=int) def _length_hint(space, w_iterable, default): return space.wrap(space.length_hint(w_iterable, default)) + + +def isMappingType(space, w_obj): + 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' + if space.is_oldstyle_instance(w_obj): + result = (space.findattr(w_obj, space.wrap('__getitem__')) is not None) + else: + flag = space.type(w_obj).flag_map_or_seq + if flag == 'M': + result = True + elif flag == 'S': + result = False + else: + result = (space.lookup(w_obj, '__getitem__') is not None and + space.lookup(w_obj, '__getslice__') is None) + return space.wrap(result) + +def isSequenceType(space, w_obj): + 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' + if space.is_oldstyle_instance(w_obj): + result = (space.findattr(w_obj, space.wrap('__getitem__')) is not None) + else: + flag = space.type(w_obj).flag_map_or_seq + if flag == 'M': + result = False + elif flag == 'S': + result = True + else: + result = (space.lookup(w_obj, '__getitem__') is not None) + return space.wrap(result) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -184,6 +184,19 @@ class Dict(dict): pass assert not operator.isSequenceType(Dict()) + def test_isXxxType_more(self): + import operator + + assert not operator.isSequenceType(list) + assert not operator.isSequenceType(dict) + assert not operator.isSequenceType({}) + assert not operator.isMappingType(list) + assert not operator.isMappingType(dict) + assert not operator.isMappingType([]) + assert not operator.isMappingType(()) + assert not operator.isNumberType(int) + assert not operator.isNumberType(float) + def test_inplace(self): import operator diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -273,7 +273,8 @@ def iter(space, w_obj): w_descr = space.lookup(w_obj, '__iter__') if w_descr is None: - w_descr = space.lookup(w_obj, '__getitem__') + if space.type(w_obj).flag_map_or_seq != 'M': + w_descr = space.lookup(w_obj, '__getitem__') if w_descr is None: raise oefmt(space.w_TypeError, "'%T' object is not iterable", w_obj) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -90,6 +90,7 @@ self.builtin_types[typedef.name] = w_type setattr(self, 'w_' + typedef.name, w_type) self._interplevel_classes[w_type] = cls + self.w_dict.flag_map_or_seq = 'M' self.builtin_types["NotImplemented"] = self.w_NotImplemented self.builtin_types["Ellipsis"] = self.w_Ellipsis self.w_basestring = self.builtin_types['basestring'] = \ diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -126,6 +126,7 @@ "flag_cpytype", "flag_abstract?", "flag_sequence_bug_compat", + "flag_map_or_seq", # '?' or 'M' or 'S' 'needsdel', 'weakrefable', 'hasdict', @@ -162,6 +163,7 @@ w_self.flag_cpytype = False w_self.flag_abstract = False w_self.flag_sequence_bug_compat = False + w_self.flag_map_or_seq = '?' # '?' means "don't know, check otherwise" if overridetypedef is not None: assert not force_new_layout @@ -1096,6 +1098,8 @@ continue w_self.flag_cpytype |= w_base.flag_cpytype w_self.flag_abstract |= w_base.flag_abstract + if w_self.flag_map_or_seq == '?': + w_self.flag_map_or_seq = w_base.flag_map_or_seq hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase) layout = create_all_slots(w_self, hasoldstylebase, w_bestbase, From pypy.commits at gmail.com Mon Apr 18 10:41:02 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Apr 2016 07:41:02 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Found out that we have space.issequence_w(): move the logic from the Message-ID: <5714f1fe.0308c20a.8fe86.fffff1fa@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83744:3decb320c37e Date: 2016-04-18 16:40 +0200 http://bitbucket.org/pypy/pypy/changeset/3decb320c37e/ Log: Found out that we have space.issequence_w(): move the logic from the operator module to there. Add space.ismapping_w() and call it from PyMapping_Check(). Add tests in cpyext. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1173,7 +1173,27 @@ return self.w_False def issequence_w(self, w_obj): - return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return False + elif flag == 'S': + return True + else: + return (self.lookup(w_obj, '__getitem__') is not None) + + def ismapping_w(self, w_obj): + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return True + elif flag == 'S': + return False + else: + return (self.lookup(w_obj, '__getitem__') is not None and + self.lookup(w_obj, '__getslice__') is None) # The code below only works # for the simple case (new-style instance). diff --git a/pypy/module/cpyext/mapping.py b/pypy/module/cpyext/mapping.py --- a/pypy/module/cpyext/mapping.py +++ b/pypy/module/cpyext/mapping.py @@ -8,7 +8,7 @@ def PyMapping_Check(space, w_obj): """Return 1 if the object provides mapping protocol, and 0 otherwise. This function always succeeds.""" - return int(space.findattr(w_obj, space.wrap("items")) is not None) + return int(space.ismapping_w(w_obj)) @cpython_api([PyObject], Py_ssize_t, error=-1) def PyMapping_Size(space, w_obj): diff --git a/pypy/module/cpyext/test/test_iterator.py b/pypy/module/cpyext/test/test_iterator.py --- a/pypy/module/cpyext/test/test_iterator.py +++ b/pypy/module/cpyext/test/test_iterator.py @@ -37,7 +37,14 @@ obj = PyObject_New(PyObject, &Foo_Type); return obj; ''' - )], + ), + ("check", "METH_O", + ''' + return PyInt_FromLong( + PySequence_Check(args) + + PyMapping_Check(args) * 2); + ''') + ], ''' static PyObject * mp_subscript(PyObject *self, PyObject *key) @@ -65,6 +72,8 @@ import operator assert not operator.isSequenceType(obj) assert operator.isMappingType(obj) + # + assert module.check(obj) == 2 def test_iterable_nonmapping_object(self): module = self.import_extension('foo', [ @@ -78,8 +87,14 @@ if (PyType_Ready(&Foo_Type) < 0) return NULL; obj = PyObject_New(PyObject, &Foo_Type); return obj; + '''), + ("check", "METH_O", ''' - )], + return PyInt_FromLong( + PySequence_Check(args) + + PyMapping_Check(args) * 2); + ''') + ], ''' static PyObject * sq_item(PyObject *self, Py_ssize_t size) @@ -108,3 +123,5 @@ import operator assert operator.isSequenceType(obj) assert not operator.isMappingType(obj) + # + assert module.check(obj) == 1 diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -6,6 +6,10 @@ import py.test class TestSequence(BaseApiTest): + def test_check(self, space, api): + assert api.PySequence_Check(space.newlist([])) + assert not api.PySequence_Check(space.newdict()) + def test_sequence(self, space, api): w_l = space.wrap([1, 2, 3, 4]) assert api.PySequence_Fast(w_l, "message") is w_l diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -252,29 +252,8 @@ def isMappingType(space, w_obj): 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - if space.is_oldstyle_instance(w_obj): - result = (space.findattr(w_obj, space.wrap('__getitem__')) is not None) - else: - flag = space.type(w_obj).flag_map_or_seq - if flag == 'M': - result = True - elif flag == 'S': - result = False - else: - result = (space.lookup(w_obj, '__getitem__') is not None and - space.lookup(w_obj, '__getslice__') is None) - return space.wrap(result) + return space.wrap(space.ismapping_w(w_obj)) def isSequenceType(space, w_obj): 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - if space.is_oldstyle_instance(w_obj): - result = (space.findattr(w_obj, space.wrap('__getitem__')) is not None) - else: - flag = space.type(w_obj).flag_map_or_seq - if flag == 'M': - result = False - elif flag == 'S': - result = True - else: - result = (space.lookup(w_obj, '__getitem__') is not None) - return space.wrap(result) + return space.wrap(space.issequence_w(w_obj)) From pypy.commits at gmail.com Mon Apr 18 12:09:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 18 Apr 2016 09:09:17 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: DRY: path unwrappers Message-ID: <571506ad.c711c30a.86149.089d@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83745:a74d298f64b7 Date: 2016-04-18 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/a74d298f64b7/ Log: DRY: path unwrappers diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -132,38 +132,33 @@ self.as_unicode = unicode self.w_path = w_path + at specialize.arg(2) +def _unwrap_path(space, w_value, allow_fd=True): + if space.is_none(w_value): + raise oefmt(space.w_TypeError, + "can't specify None for path argument") + if _WIN32: + try: + path_u = space.unicode_w(w_value) + return Path(-1, None, path_u, w_value) + except OperationError: + pass + try: + path_b = space.fsencode_w(w_value) + return Path(-1, path_b, None, w_value) + except OperationError: + if allow_fd: + fd = unwrap_fd(space, w_value, "string, bytes or integer") + return Path(fd, None, None, w_value) + raise oefmt(space.w_TypeError, "illegal type for path parameter") + class _PathOrFd(Unwrapper): def unwrap(self, space, w_value): - if space.is_none(w_value): - raise oefmt(space.w_TypeError, - "can't specify None for path argument") - if _WIN32: - try: - path_u = space.unicode_w(w_value) - return Path(-1, None, path_u, w_value) - except OperationError: - pass - try: - path_b = space.fsencode_w(w_value) - return Path(-1, path_b, None, w_value) - except OperationError: - pass - fd = unwrap_fd(space, w_value, "string, bytes or integer") - return Path(fd, None, None, w_value) + return _unwrap_path(space, w_value, allow_fd=True) class _JustPath(Unwrapper): def unwrap(self, space, w_value): - if _WIN32: - try: - path_u = space.unicode_w(w_value) - return Path(-1, None, path_u, w_value) - except OperationError: - pass - try: - path_b = space.fsencode_w(w_value) - return Path(-1, path_b, None, w_value) - except OperationError: - raise oefmt(space.w_TypeError, "illegal type for path parameter") + return _unwrap_path(space, w_value, allow_fd=False) def path_or_fd(allow_fd=True): return _PathOrFd if allow_fd else _JustPath From pypy.commits at gmail.com Mon Apr 18 12:20:24 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 09:20:24 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <57150948.4412c30a.ba3b8.00bd@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83746:a734e99db063 Date: 2016-04-18 19:18 +0300 http://bitbucket.org/pypy/pypy/changeset/a734e99db063/ Log: merge default into branch diff too long, truncating to 2000 out of 10773 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,51 +1,22 @@ -from resource import _struct_rusage, struct_rusage -from ctypes import CDLL, c_int, POINTER, byref -from ctypes.util import find_library +from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] -libc = CDLL(find_library("c")) -c_wait3 = libc.wait3 -c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait3.restype = c_int - -c_wait4 = libc.wait4 -c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait4.restype = c_int - -def create_struct_rusage(c_struct): - return struct_rusage(( - float(c_struct.ru_utime), - float(c_struct.ru_stime), - c_struct.ru_maxrss, - c_struct.ru_ixrss, - c_struct.ru_idrss, - c_struct.ru_isrss, - c_struct.ru_minflt, - c_struct.ru_majflt, - c_struct.ru_nswap, - c_struct.ru_inblock, - c_struct.ru_oublock, - c_struct.ru_msgsnd, - c_struct.ru_msgrcv, - c_struct.ru_nsignals, - c_struct.ru_nvcsw, - c_struct.ru_nivcsw)) def wait3(options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait3(status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage def wait4(pid, options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait4(pid, status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_resource_build.py @@ -0,0 +1,118 @@ +from cffi import FFI + +ffi = FFI() + +# Note: we don't directly expose 'struct timeval' or 'struct rlimit' + + +rlimit_consts = ''' +RLIMIT_CPU +RLIMIT_FSIZE +RLIMIT_DATA +RLIMIT_STACK +RLIMIT_CORE +RLIMIT_NOFILE +RLIMIT_OFILE +RLIMIT_VMEM +RLIMIT_AS +RLIMIT_RSS +RLIMIT_NPROC +RLIMIT_MEMLOCK +RLIMIT_SBSIZE +RLIM_INFINITY +RUSAGE_SELF +RUSAGE_CHILDREN +RUSAGE_BOTH +'''.split() + +rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s) + for s in rlimit_consts] + + +ffi.set_source("_resource_cffi", """ +#include +#include +#include +#include + +static const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[] = { +$RLIMIT_CONSTS + { NULL, 0 } +}; + +#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001) + +static double my_utime(struct rusage *input) +{ + return doubletime(input->ru_utime); +} + +static double my_stime(struct rusage *input) +{ + return doubletime(input->ru_stime); +} + +static int my_getrlimit(int resource, long long result[2]) +{ + struct rlimit rl; + if (getrlimit(resource, &rl) == -1) + return -1; + result[0] = rl.rlim_cur; + result[1] = rl.rlim_max; + return 0; +} + +static int my_setrlimit(int resource, long long cur, long long max) +{ + struct rlimit rl; + rl.rlim_cur = cur & RLIM_INFINITY; + rl.rlim_max = max & RLIM_INFINITY; + return setrlimit(resource, &rl); +} + +""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts))) + + +ffi.cdef(""" + +#define RLIM_NLIMITS ... + +const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[]; + +struct rusage { + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; + ...; +}; + +static double my_utime(struct rusage *); +static double my_stime(struct rusage *); +void getrusage(int who, struct rusage *result); +int my_getrlimit(int resource, long long result[2]); +int my_setrlimit(int resource, long long cur, long long max); + +int wait3(int *status, int options, struct rusage *rusage); +int wait4(int pid, int *status, int options, struct rusage *rusage); +""") + + +if __name__ == "__main__": + ffi.compile() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,7 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -88,6 +89,12 @@ # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) @@ -98,7 +105,10 @@ #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) - parts.append('void __cffi_extern_python_start; ') + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) @@ -302,7 +312,7 @@ break # try: - self._inside_extern_python = False + self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -376,8 +386,10 @@ tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' - elif self._inside_extern_python: + elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) @@ -421,11 +433,9 @@ # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" - self._inside_extern_python = not self._inside_extern_python - assert self._inside_extern_python == ( - decl.name == '__cffi_extern_python_start') + self._inside_extern_python = decl.name else: - if self._inside_extern_python: + if self._inside_extern_python !='__cffi_extern_python_stop': raise api.CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1145,11 +1145,11 @@ def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype - def _generate_cpy_dllexport_python_collecttype(self, tp, name): - self._generate_cpy_extern_python_collecttype(tp, name) - - def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1184,11 +1184,7 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - if dllexport: - tag = 'CFFI_DLLEXPORT' - else: - tag = 'static' - prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1206,8 +1202,14 @@ prnt() self._num_externpy += 1 + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + def _generate_cpy_dllexport_python_decl(self, tp, name): - self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: @@ -1220,8 +1222,9 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - def _generate_cpy_dllexport_python_ctx(self, tp, name): - self._generate_cpy_extern_python_ctx(tp, name) + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx def _string_literal(self, s): def _char_repr(c): @@ -1231,7 +1234,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1322,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1336,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1350,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1447,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1468,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1495,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py deleted file mode 100644 diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/dumpcache.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys, os -from ctypes_configure import dumpcache - -def dumpcache2(basename, config): - size = 32 if sys.maxint <= 2**32 else 64 - filename = '_%s_%s_.py' % (basename, size) - dumpcache.dumpcache(__file__, filename, config) - # - filename = os.path.join(os.path.dirname(__file__), - '_%s_cache.py' % (basename,)) - g = open(filename, 'w') - print >> g, '''\ -import sys -_size = 32 if sys.maxint <= 2**32 else 64 -# XXX relative import, should be removed together with -# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib -_mod = __import__("_%s_%%s_" %% (_size,), - globals(), locals(), ["*"]) -globals().update(_mod.__dict__)\ -''' % (basename,) - g.close() diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/locale.ctc.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -'ctypes_configure' source for _locale.py. -Run this to rebuild _locale_cache.py. -""" - -from ctypes_configure.configure import (configure, ExternalCompilationInfo, - ConstantInteger, DefinedConstantInteger, SimpleType, check_eci) -import dumpcache - -# ____________________________________________________________ - -_CONSTANTS = [ - 'LC_CTYPE', - 'LC_TIME', - 'LC_COLLATE', - 'LC_MONETARY', - 'LC_MESSAGES', - 'LC_NUMERIC', - 'LC_ALL', - 'CHAR_MAX', -] - -class LocaleConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['limits.h', - 'locale.h']) -for key in _CONSTANTS: - setattr(LocaleConfigure, key, DefinedConstantInteger(key)) - -config = configure(LocaleConfigure, noerr=True) -for key, value in config.items(): - if value is None: - del config[key] - _CONSTANTS.remove(key) - -# ____________________________________________________________ - -eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h']) -HAS_LANGINFO = check_eci(eci) - -if HAS_LANGINFO: - # list of all possible names - langinfo_names = [ - "RADIXCHAR", "THOUSEP", "CRNCYSTR", - "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR", - "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT", - "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT", - ] - for i in range(1, 8): - langinfo_names.append("DAY_%d" % i) - langinfo_names.append("ABDAY_%d" % i) - for i in range(1, 13): - langinfo_names.append("MON_%d" % i) - langinfo_names.append("ABMON_%d" % i) - - class LanginfoConfigure: - _compilation_info_ = eci - nl_item = SimpleType('nl_item') - for key in langinfo_names: - setattr(LanginfoConfigure, key, DefinedConstantInteger(key)) - - langinfo_config = configure(LanginfoConfigure) - for key, value in langinfo_config.items(): - if value is None: - del langinfo_config[key] - langinfo_names.remove(key) - config.update(langinfo_config) - _CONSTANTS += langinfo_names - -# ____________________________________________________________ - -config['ALL_CONSTANTS'] = tuple(_CONSTANTS) -config['HAS_LANGINFO'] = HAS_LANGINFO -dumpcache.dumpcache2('locale', config) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py deleted file mode 100755 --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# Run this script to rebuild all caches from the *.ctc.py files. - -import os, sys - -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) - -import py - -_dirpath = os.path.dirname(__file__) or os.curdir - -from rpython.tool.ansi_print import AnsiLogger -log = AnsiLogger("ctypes_config_cache") - - -def rebuild_one(name): - filename = os.path.join(_dirpath, name) - d = {'__file__': filename} - path = sys.path[:] - try: - sys.path.insert(0, _dirpath) - execfile(filename, d) - finally: - sys.path[:] = path - -def try_rebuild(): - size = 32 if sys.maxint <= 2**32 else 64 - # remove the files '_*_size_.py' - left = {} - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_%s_.py' % size) or - p.endswith('_%s_.pyc' % size)): - os.unlink(os.path.join(_dirpath, p)) - elif p.startswith('_') and (p.endswith('_.py') or - p.endswith('_.pyc')): - for i in range(2, len(p)-4): - left[p[:i]] = True - # remove the files '_*_cache.py' if there is no '_*_*_.py' left around - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_cache.py') or - p.endswith('_cache.pyc')): - if p[:-9] not in left: - os.unlink(os.path.join(_dirpath, p)) - # - for p in os.listdir(_dirpath): - if p.endswith('.ctc.py'): - try: - rebuild_one(p) - except Exception, e: - log.ERROR("Running %s:\n %s: %s" % ( - os.path.join(_dirpath, p), - e.__class__.__name__, e)) - - -if __name__ == '__main__': - try_rebuild() diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/resource.ctc.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -'ctypes_configure' source for resource.py. -Run this to rebuild _resource_cache.py. -""" - - -from ctypes import sizeof -import dumpcache -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger, - SimpleType) - - -_CONSTANTS = ( - 'RLIM_INFINITY', - 'RLIM_NLIMITS', -) -_OPTIONAL_CONSTANTS = ( - 'RLIMIT_CPU', - 'RLIMIT_FSIZE', - 'RLIMIT_DATA', - 'RLIMIT_STACK', - 'RLIMIT_CORE', - 'RLIMIT_RSS', - 'RLIMIT_NPROC', - 'RLIMIT_NOFILE', - 'RLIMIT_OFILE', - 'RLIMIT_MEMLOCK', - 'RLIMIT_AS', - 'RLIMIT_LOCKS', - 'RLIMIT_SIGPENDING', - 'RLIMIT_MSGQUEUE', - 'RLIMIT_NICE', - 'RLIMIT_RTPRIO', - 'RLIMIT_VMEM', - - 'RUSAGE_BOTH', - 'RUSAGE_SELF', - 'RUSAGE_CHILDREN', -) - -# Setup our configure -class ResourceConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) - rlim_t = SimpleType('rlim_t') -for key in _CONSTANTS: - setattr(ResourceConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(ResourceConfigure, key, DefinedConstantInteger(key)) - -# Configure constants and types -config = configure(ResourceConfigure) -config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 -optional_constants = [] -for key in _OPTIONAL_CONSTANTS: - if config[key] is not None: - optional_constants.append(key) - else: - del config[key] - -config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) -dumpcache.dumpcache2('resource', config) diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -1,4 +1,4 @@ -# ctypes implementation: Victor Stinner, 2008-05-08 +# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides access to the Unix password database. It is available on all Unix versions. diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -1,15 +1,8 @@ -import sys -if sys.platform == 'win32': - raise ImportError('resource module not available for win32') +"""http://docs.python.org/library/resource""" -# load the platform-specific cache made by running resource.ctc.py -from ctypes_config_cache._resource_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno -from ctypes import Structure, c_int, c_long, byref, POINTER +from _resource_cffi import ffi, lib from errno import EINVAL, EPERM -import _structseq +import _structseq, os try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -18,106 +11,37 @@ class error(Exception): pass +class struct_rusage: + """struct_rusage: Result from getrusage. -# Read required libc functions -_getrusage = libc.getrusage -_getrlimit = libc.getrlimit -_setrlimit = libc.setrlimit -try: - _getpagesize = libc.getpagesize - _getpagesize.argtypes = () - _getpagesize.restype = c_int -except AttributeError: - from os import sysconf - _getpagesize = None +This object may be accessed either as a tuple of + (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt, + nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw) +or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.""" - -class timeval(Structure): - _fields_ = ( - ("tv_sec", c_long), - ("tv_usec", c_long), - ) - def __str__(self): - return "(%s, %s)" % (self.tv_sec, self.tv_usec) - - def __float__(self): - return self.tv_sec + self.tv_usec/1000000.0 - -class _struct_rusage(Structure): - _fields_ = ( - ("ru_utime", timeval), - ("ru_stime", timeval), - ("ru_maxrss", c_long), - ("ru_ixrss", c_long), - ("ru_idrss", c_long), - ("ru_isrss", c_long), - ("ru_minflt", c_long), - ("ru_majflt", c_long), - ("ru_nswap", c_long), - ("ru_inblock", c_long), - ("ru_oublock", c_long), - ("ru_msgsnd", c_long), - ("ru_msgrcv", c_long), - ("ru_nsignals", c_long), - ("ru_nvcsw", c_long), - ("ru_nivcsw", c_long), - ) - -_getrusage.argtypes = (c_int, POINTER(_struct_rusage)) -_getrusage.restype = c_int - - -class struct_rusage: __metaclass__ = _structseq.structseqtype - ru_utime = _structseq.structseqfield(0) - ru_stime = _structseq.structseqfield(1) - ru_maxrss = _structseq.structseqfield(2) - ru_ixrss = _structseq.structseqfield(3) - ru_idrss = _structseq.structseqfield(4) - ru_isrss = _structseq.structseqfield(5) - ru_minflt = _structseq.structseqfield(6) - ru_majflt = _structseq.structseqfield(7) - ru_nswap = _structseq.structseqfield(8) - ru_inblock = _structseq.structseqfield(9) - ru_oublock = _structseq.structseqfield(10) - ru_msgsnd = _structseq.structseqfield(11) - ru_msgrcv = _structseq.structseqfield(12) - ru_nsignals = _structseq.structseqfield(13) - ru_nvcsw = _structseq.structseqfield(14) - ru_nivcsw = _structseq.structseqfield(15) + ru_utime = _structseq.structseqfield(0, "user time used") + ru_stime = _structseq.structseqfield(1, "system time used") + ru_maxrss = _structseq.structseqfield(2, "max. resident set size") + ru_ixrss = _structseq.structseqfield(3, "shared memory size") + ru_idrss = _structseq.structseqfield(4, "unshared data size") + ru_isrss = _structseq.structseqfield(5, "unshared stack size") + ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O") + ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O") + ru_nswap = _structseq.structseqfield(8, "number of swap outs") + ru_inblock = _structseq.structseqfield(9, "block input operations") + ru_oublock = _structseq.structseqfield(10, "block output operations") + ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent") + ru_msgrcv = _structseq.structseqfield(12, "IPC messages received") + ru_nsignals = _structseq.structseqfield(13,"signals received") + ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches") + ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches") - at builtinify -def rlimit_check_bounds(rlim_cur, rlim_max): - if rlim_cur > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_cur) - if rlim_max > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_max) - -class rlimit(Structure): - _fields_ = ( - ("rlim_cur", rlim_t), - ("rlim_max", rlim_t), - ) - -_getrlimit.argtypes = (c_int, POINTER(rlimit)) -_getrlimit.restype = c_int -_setrlimit.argtypes = (c_int, POINTER(rlimit)) -_setrlimit.restype = c_int - - - at builtinify -def getrusage(who): - ru = _struct_rusage() - ret = _getrusage(who, byref(ru)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - raise ValueError("invalid who parameter") - raise error(errno) +def _make_struct_rusage(ru): return struct_rusage(( - float(ru.ru_utime), - float(ru.ru_stime), + lib.my_utime(ru), + lib.my_stime(ru), ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, @@ -135,48 +59,59 @@ )) @builtinify +def getrusage(who): + ru = ffi.new("struct rusage *") + if lib.getrusage(who, ru) == -1: + if ffi.errno == EINVAL: + raise ValueError("invalid who parameter") + raise error(ffi.errno) + return _make_struct_rusage(ru) + + at builtinify def getrlimit(resource): - if not(0 <= resource < RLIM_NLIMITS): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlim = rlimit() - ret = _getrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - raise error(errno) - return (rlim.rlim_cur, rlim.rlim_max) + result = ffi.new("long long[2]") + if lib.my_getrlimit(resource, result) == -1: + raise error(ffi.errno) + return (result[0], result[1]) @builtinify -def setrlimit(resource, rlim): - if not(0 <= resource < RLIM_NLIMITS): +def setrlimit(resource, limits): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlimit_check_bounds(*rlim) - rlim = rlimit(rlim[0], rlim[1]) - ret = _setrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - return ValueError("current limit exceeds maximum limit") - elif errno == EPERM: - return ValueError("not allowed to raise maximum limit") + limits = tuple(limits) + if len(limits) != 2: + raise ValueError("expected a tuple of 2 integers") + + if lib.my_setrlimit(resource, limits[0], limits[1]) == -1: + if ffi.errno == EINVAL: + raise ValueError("current limit exceeds maximum limit") + elif ffi.errno == EPERM: + raise ValueError("not allowed to raise maximum limit") else: - raise error(errno) + raise error(ffi.errno) + @builtinify def getpagesize(): - if _getpagesize: - return _getpagesize() - else: - try: - return sysconf("SC_PAGE_SIZE") - except ValueError: - # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE - return sysconf("SC_PAGESIZE") + return os.sysconf("SC_PAGESIZE") -__all__ = ALL_CONSTANTS + ( - 'error', 'timeval', 'struct_rusage', 'rlimit', - 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize', + +def _setup(): + all_constants = [] + p = lib.my_rlimit_consts + while p.name: + name = ffi.string(p.name) + globals()[name] = int(p.value) + all_constants.append(name) + p += 1 + return all_constants + +__all__ = tuple(_setup()) + ( + 'error', 'getpagesize', 'struct_rusage', + 'getrusage', 'getrlimit', 'setrlimit', ) - -del ALL_CONSTANTS +del _setup diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,136 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about a month after PyPy 5.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0 released in March 2015) +========================================================= + +* New features: + + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. + + * Add better support for PyUnicodeObject in the C-API compatibility layer + + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility + +* Bug Fixes + + * Catch exceptions raised in an exit function + + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Fix sandbox startup (a regression in 5.0) + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Implemented numpy.where for a single argument + + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy + +* Performance improvements: + + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting + + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend + +* Internal refactorings: + + * Use a simpler logger to speed up translation + + * Drop vestiges of Python 2.5 support in testing + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -0,0 +1,62 @@ +========================= +What's new in PyPy 5.1 +========================= + +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,34 +1,16 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 +.. this is a revision shortly after release-5.1 +.. startrev: 2180e1eaf6f6 -.. branch: s390x-backend +.. branch: rposix-for-3 -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces \ No newline at end of file +Add broadcast to micronumpy diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True @@ -339,10 +340,6 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() - space = make_objspace(config) # manually imports app_main.py diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1029,6 +1029,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -291,13 +291,7 @@ return tb def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -15,9 +15,9 @@ # ____________________________________________________________ -def sorted(lst, cmp=None, key=None, reverse=False): +def sorted(iterable, cmp=None, key=None, reverse=False): "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(cmp, key, reverse) return sorted_lst diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,23 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. (Unicodes not supported right now.) + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + "promoting unicode unsupported")) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + raises(TypeError, _promote, u"abc") + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.5.2" +VERSION = "1.6.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) @@ -367,6 +371,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): @@ -127,6 +127,21 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists + space = self.space + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrap(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -125,6 +132,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +182,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] @@ -221,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -313,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -391,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -421,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,25 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. + +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" + # + return w_cdata.unpack(length) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -611,6 +630,38 @@ return w_result + def descr_list_types(self): + """\ +Returns the user type names known to this FFI instance. +This returns a tuple containing three lists of names: +(typedef_names, names_of_structs, names_of_unions)""" + # + space = self.space + ctx = self.ctxobj.ctx + + lst1_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_typenames')): + s = rffi.charp2str(ctx.c_typenames[i].c_name) + lst1_w.append(space.wrap(s)) + + lst2_w = [] + lst3_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_struct_unions')): + su = ctx.c_struct_unions[i] + if su.c_name[0] == '$': + continue + s = rffi.charp2str(su.c_name) + if rffi.getintfield(su, 'c_flags') & cffi_opcode.F_UNION: + lst_w = lst3_w + else: + lst_w = lst2_w + lst_w.append(space.wrap(s)) + + return space.newtuple([space.newlist(lst1_w), + space.newlist(lst2_w), + space.newlist(lst3_w)]) + + def descr_init_once(self, w_func, w_tag): """\ init_once(function, tag): run function() once. More precisely, @@ -731,6 +782,7 @@ getctype = interp2app(W_FFIObject.descr_getctype), init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), + list_types = interp2app(W_FFIObject.descr_list_types), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), @@ -739,4 +791,5 @@ sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), From pypy.commits at gmail.com Mon Apr 18 12:42:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 18 Apr 2016 09:42:24 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Use path_or_fd in readlink() Message-ID: <57150e70.2457c20a.61d0d.106e@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83747:5e7acbe41800 Date: 2016-04-18 17:41 +0100 http://bitbucket.org/pypy/pypy/changeset/5e7acbe41800/ Log: Use path_or_fd in readlink() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1138,8 +1138,10 @@ raise wrap_oserror(space, e) - at unwrap_spec(dir_fd=DirFD(rposix.HAVE_READLINKAT)) -def readlink(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_READLINKAT)) +def readlink(space, path, dir_fd=DEFAULT_DIR_FD): """readlink(path, *, dir_fd=None) -> path Return a string representing the path to which the symbolic link points. @@ -1148,20 +1150,15 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - is_unicode = space.isinstance_w(w_path, space.w_unicode) - if is_unicode: - path = space.fsencode_w(w_path) - else: - path = space.bytes0_w(w_path) try: if dir_fd == DEFAULT_DIR_FD: - result = rposix.readlink(path) + result = call_rposix(rposix.readlink, path) else: - result = rposix.readlinkat(path, dir_fd) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + result = call_rposix(rposix.readlinkat, path, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) w_result = space.wrapbytes(result) - if is_unicode: + if space.isinstance_w(path.w_path, space.w_unicode): return space.fsdecode(w_result) return w_result From pypy.commits at gmail.com Mon Apr 18 13:15:38 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 10:15:38 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix for win32 Message-ID: <5715163a.c7811c0a.898ff.65f9@mx.google.com> Author: mattip Branch: py3k Changeset: r83748:86309434696e Date: 2016-04-18 20:11 +0300 http://bitbucket.org/pypy/pypy/changeset/86309434696e/ Log: fix for win32 diff --git a/pypy/module/posix/interp_nt.py b/pypy/module/posix/interp_nt.py --- a/pypy/module/posix/interp_nt.py +++ b/pypy/module/posix/interp_nt.py @@ -1,7 +1,7 @@ from rpython.rlib import rwin32 from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper.module.ll_win32file import make_win32_traits -from rpython.rtyper.module.support import UnicodeTraits +from rpython.rlib.rwin32file import make_win32_traits +from rpython.rlib._os_support import UnicodeTraits from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -83,6 +83,8 @@ #include #ifdef _MSC_VER #define _Bool char + typedef int ssize_t; /* XXX fixme for 64 bit*/ + typedef unsigned int size_t; /* XXX fixme for 64 bit*/ #endif"""] field_names = dict.fromkeys(INSPECT) for fmtchar, ctype in INSPECT.iteritems(): From pypy.commits at gmail.com Mon Apr 18 14:12:24 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 11:12:24 -0700 (PDT) Subject: [pypy-commit] pypy default: untabbify Message-ID: <57152388.08121c0a.47f07.01d5@mx.google.com> Author: mattip Branch: Changeset: r83749:c6eb8c5bdf4f Date: 2016-04-18 21:08 +0300 http://bitbucket.org/pypy/pypy/changeset/c6eb8c5bdf4f/ Log: untabbify diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py --- a/lib_pypy/_resource_build.py +++ b/lib_pypy/_resource_build.py @@ -86,20 +86,20 @@ } my_rlimit_consts[]; struct rusage { - long ru_maxrss; - long ru_ixrss; - long ru_idrss; - long ru_isrss; - long ru_minflt; - long ru_majflt; - long ru_nswap; - long ru_inblock; - long ru_oublock; - long ru_msgsnd; - long ru_msgrcv; - long ru_nsignals; - long ru_nvcsw; - long ru_nivcsw; + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; ...; }; From pypy.commits at gmail.com Mon Apr 18 14:12:26 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 11:12:26 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <5715238a.6869c20a.44a69.4945@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83750:2e474b62e455 Date: 2016-04-18 21:09 +0300 http://bitbucket.org/pypy/pypy/changeset/2e474b62e455/ Log: merge default into branch diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py --- a/lib_pypy/_resource_build.py +++ b/lib_pypy/_resource_build.py @@ -86,20 +86,20 @@ } my_rlimit_consts[]; struct rusage { - long ru_maxrss; - long ru_ixrss; - long ru_idrss; - long ru_isrss; - long ru_minflt; - long ru_majflt; - long ru_nswap; - long ru_inblock; - long ru_oublock; - long ru_msgsnd; - long ru_msgrcv; - long ru_nsignals; - long ru_nvcsw; - long ru_nivcsw; + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; ...; }; From pypy.commits at gmail.com Mon Apr 18 15:23:02 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 18 Apr 2016 12:23:02 -0700 (PDT) Subject: [pypy-commit] pypy py3k: refactor and allow specifying flags Message-ID: <57153416.06d8c20a.38efb.4fa9@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83751:9a84b699eb82 Date: 2016-04-18 12:22 -0700 http://bitbucket.org/pypy/pypy/changeset/9a84b699eb82/ Log: refactor and allow specifying flags diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1511,7 +1511,7 @@ assert False # XXX rename/replace with code more like CPython getargs for buffers - def bufferstr_w(self, w_obj): + def bufferstr_w(self, w_obj, flags=BUF_SIMPLE): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): # indeed, the latter returns a string with the raw bytes from @@ -1525,13 +1525,7 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - try: - buf = w_obj.buffer_w(self, 0) - except BufferInterfaceNotFound: - raise oefmt(self.w_TypeError, - "'%T' does not support the buffer interface", w_obj) - else: - return buf.as_str() + return self.buffer_w(w_obj, flags).as_str() def str_or_None_w(self, w_obj): return None if self.is_none(w_obj) else self.str_w(w_obj) From pypy.commits at gmail.com Mon Apr 18 15:35:11 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 12:35:11 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fixes from merge Message-ID: <571536ef.04c31c0a.3224b.2350@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83752:54c98869472f Date: 2016-04-18 22:14 +0300 http://bitbucket.org/pypy/pypy/changeset/54c98869472f/ Log: fixes from merge diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -10,7 +10,7 @@ assert api.PySequence_Check(space.newlist([])) assert not api.PySequence_Check(space.newdict()) - def test_sequence(self, space, api): + def test_sequence_api(self, space, api): w_l = space.wrap([1, 2, 3, 4]) assert api.PySequence_Fast(w_l, "message") is w_l diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -64,6 +64,7 @@ class W_MyType(W_MyObject): name = "foobar" + flag_map_or_seq = '?' def __init__(self): self.mro_w = [w_some_obj(), w_some_obj()] diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -212,6 +212,13 @@ storage = strategy.erase(list_f) return W_ListObject.from_storage_and_strategy(space, storage, strategy) + @staticmethod + def newlist_cpyext(space, list): + from pypy.module.cpyext.sequence import CPyListStrategy, CPyListStorage + strategy = space.fromcache(CPyListStrategy) + storage = strategy.erase(CPyListStorage(space, list)) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) + def __repr__(self): """ representation for debugging purposes """ return "%s(%s, %s)" % (self.__class__.__name__, self.strategy, From pypy.commits at gmail.com Mon Apr 18 19:45:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 18 Apr 2016 16:45:29 -0700 (PDT) Subject: [pypy-commit] pypy stat_ns: implement the st_xtime_ns fields in stat_result() Message-ID: <57157199.e7bec20a.ac70.ffffa8c2@mx.google.com> Author: Ronan Lamy Branch: stat_ns Changeset: r83753:ee90d62e302e Date: 2016-04-19 00:44 +0100 http://bitbucket.org/pypy/pypy/changeset/ee90d62e302e/ Log: implement the st_xtime_ns fields in stat_result() diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -36,10 +36,12 @@ # further fields, not accessible by index (the numbers are still needed # but not visible because they are no longer consecutive) - - st_atime = structseqfield(15, "time of last access") - st_mtime = structseqfield(16, "time of last modification") - st_ctime = structseqfield(17, "time of last status change") + st_atime = structseqfield(11, "time of last access") + st_mtime = structseqfield(12, "time of last modification") + st_ctime = structseqfield(13, "time of last change") + st_atime_ns = structseqfield(14, "time of last access in nanoseconds") + st_mtime_ns = structseqfield(15, "time of last modification in nanoseconds") + st_ctime_ns = structseqfield(16, "time of last change in nanoseconds") if "st_blksize" in posix._statfields: st_blksize = structseqfield(20, "blocksize for filesystem I/O") diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -294,44 +294,56 @@ STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) +N_INDEXABLE_FIELDS = 10 + +def _time_ns_from_float(ftime): + "Convert a floating-point time (in seconds) into a (s, ns) pair of ints" + fracpart, intpart = modf(ftime) + if fracpart < 0: + fracpart += 1. + intpart -= 1. + return int(intpart), int(fracpart * 1e9) + + at specialize.arg(4) +def _fill_time(space, lst, index, w_keywords, attrname, ftime): + stat_float_times = space.fromcache(StatState).stat_float_times + seconds, fractional_ns = _time_ns_from_float(ftime) + lst[index] = space.wrap(seconds) + if stat_float_times: + space.setitem(w_keywords, space.wrap(attrname), space.wrap(ftime)) + else: + space.setitem(w_keywords, space.wrap(attrname), space.wrap(seconds)) + w_billion = space.wrap(1000000000) + w_total_ns = space.add(space.mul(space.wrap(seconds), w_billion), + space.wrap(fractional_ns)) + space.setitem(w_keywords, space.wrap(attrname + '_ns'), w_total_ns) + +STANDARD_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS[:7])) +EXTRA_FIELDS = unrolling_iterable(rposix_stat.STAT_FIELDS[10:]) def build_stat_result(space, st): - FIELDS = STAT_FIELDS # also when not translating at all - lst = [None] * rposix_stat.N_INDEXABLE_FIELDS + lst = [None] * N_INDEXABLE_FIELDS w_keywords = space.newdict() - stat_float_times = space.fromcache(StatState).stat_float_times - for i, (name, TYPE) in FIELDS: + for (i, (name, TYPE)) in STANDARD_FIELDS: value = getattr(st, name) - if name in ('st_atime', 'st_mtime', 'st_ctime'): - value = int(value) # rounded to an integer for indexed access w_value = space.wrap(value) - if i < rposix_stat.N_INDEXABLE_FIELDS: - lst[i] = w_value - else: - space.setitem(w_keywords, space.wrap(name), w_value) + lst[i] = w_value - # non-rounded values for name-based access - if stat_float_times: - space.setitem(w_keywords, - space.wrap('st_atime'), space.wrap(st.st_atime)) - space.setitem(w_keywords, - space.wrap('st_mtime'), space.wrap(st.st_mtime)) - space.setitem(w_keywords, - space.wrap('st_ctime'), space.wrap(st.st_ctime)) - else: - space.setitem(w_keywords, - space.wrap('st_atime'), space.wrap(int(st.st_atime))) - space.setitem(w_keywords, - space.wrap('st_mtime'), space.wrap(int(st.st_mtime))) - space.setitem(w_keywords, - space.wrap('st_ctime'), space.wrap(int(st.st_ctime))) + _fill_time(space, lst, 7, w_keywords, 'st_atime', st.st_atime) + _fill_time(space, lst, 8, w_keywords, 'st_mtime', st.st_mtime) + _fill_time(space, lst, 9, w_keywords, 'st_ctime', st.st_ctime) + + for name, TYPE in EXTRA_FIELDS: + value = getattr(st, name) + w_value = space.wrap(value) + space.setitem(w_keywords, space.wrap(name), w_value) w_tuple = space.newtuple(lst) w_stat_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_statvfs_result(space, st): vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) From pypy.commits at gmail.com Mon Apr 18 19:49:30 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 18 Apr 2016 16:49:30 -0700 (PDT) Subject: [pypy-commit] pypy default: fix annotation error reporting for stat_result.__getitem__ Message-ID: <5715728a.cfa81c0a.320e3.64a4@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83754:144d46bb01e4 Date: 2016-04-19 00:48 +0100 http://bitbucket.org/pypy/pypy/changeset/144d46bb01e4/ Log: fix annotation error reporting for stat_result.__getitem__ diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -94,7 +94,8 @@ return self.__class__, def getattr(self, s_attr): - assert s_attr.is_constant(), "non-constant attr name in getattr()" + if not s_attr.is_constant(): + raise annmodel.AnnotatorError("non-constant attr name in getattr()") attrname = s_attr.const TYPE = STAT_FIELD_TYPES[attrname] return lltype_to_annotation(TYPE) From pypy.commits at gmail.com Mon Apr 18 20:44:35 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 18 Apr 2016 17:44:35 -0700 (PDT) Subject: [pypy-commit] pypy default: fix isinstance(deque(), Hashable) on the pure python deque Message-ID: <57157f73.455ec20a.30b4.ffffc211@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83755:5d26a6aafe9b Date: 2016-04-18 17:42 -0700 http://bitbucket.org/pypy/pypy/changeset/5d26a6aafe9b/ Log: fix isinstance(deque(), Hashable) on the pure python deque diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -320,8 +320,7 @@ def __reduce_ex__(self, proto): return type(self), (list(self), self.maxlen) - def __hash__(self): - raise TypeError("deque objects are unhashable") + __hash__ = None def __copy__(self): return self.__class__(self, self.maxlen) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -62,6 +62,12 @@ raises(IndexError, d.remove, 'c') assert len(d) == 0 + def test_deque_unhashable(self): + from collections import Hashable + d = self.get_deque() + raises(TypeError, hash, d) + assert not isinstance(d, Hashable) + class AppTestDequeExtra: spaceconfig = dict(usemodules=('binascii', 'struct',)) From pypy.commits at gmail.com Tue Apr 19 02:40:50 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 23:40:50 -0700 (PDT) Subject: [pypy-commit] buildbot default: ensure cp step will not fail Message-ID: <5715d2f2.4ca51c0a.2c8c5.0a35@mx.google.com> Author: mattip Branch: Changeset: r997:11de30c674f6 Date: 2016-04-19 09:40 +0300 http://bitbucket.org/pypy/buildbot/changeset/11de30c674f6/ Log: ensure cp step will not fail diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -702,7 +702,8 @@ workdir='.')) self.addStep(ShellCmd( description="copy ctypes resource cache", - command=['cp', '-rv', 'pypy-c/lib_pypy/ctypes_config_cache', 'build/lib_pypy'], + # the || : ensures this always succeeds, eventually remove this step + command=['cp', '-rv', 'pypy-c/lib_pypy/ctypes_config_cache', 'build/lib_pypy', '||', ':'], haltOnFailure=True, workdir='.')) self.addStep(ShellCmd( From pypy.commits at gmail.com Tue Apr 19 02:44:10 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 23:44:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix tests for -A Message-ID: <5715d3ba.4412c30a.ba3b8.ffffe67b@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83756:625b0b723c5b Date: 2016-04-19 01:46 +0300 http://bitbucket.org/pypy/pypy/changeset/625b0b723c5b/ Log: fix tests for -A diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -19,7 +19,6 @@ #define PyArray_SimpleNew _PyArray_SimpleNew #define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto #define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -228,7 +228,7 @@ return simple_new(space, nd, dims, typenum, order=order, owning=owning, w_subtype=w_subtype) - at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) + at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1, header=HEADER) def PyArray_CopyInto(space, w_dest, w_src): assert isinstance(w_dest, W_NDimArray) assert isinstance(w_src, W_NDimArray) diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -17,10 +17,3 @@ return arr; } -int -_PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) -{ - memcpy(_PyArray_DATA(dest), _PyArray_DATA(src), _PyArray_NBYTES(dest)); - return 0; -} - diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -303,14 +303,19 @@ ), ], include_dirs=self.numpy_include, prologue=''' + #ifdef PYPY_VERSION + #include + #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include + #ifdef PYPY_VERSION + #define PyArray_FromObject _PyArray_FromObject + #define PyArray_FromAny _PyArray_FromAny + #endif ''', more_init = ''' - #ifndef PYPY_VER + #ifndef PYPY_VERSION import_array(); - #else - #include #endif ''') arr = mod.test_simplenew() @@ -343,14 +348,15 @@ '''), ], include_dirs=self.numpy_include, prologue=''' + #ifdef PYPY_VERSION + #include + #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include ''', more_init = ''' - #ifndef PYPY_VER + #ifndef PYPY_VERSION import_array(); - #else - #include #endif ''') array = ndarray((3, 4), dtype='d') @@ -359,6 +365,7 @@ def test_ufunc(self): if self.runappdirect: + from numpy import arange py.test.xfail('why does this segfault on cpython?') else: from _numpypy.multiarray import arange @@ -399,12 +406,13 @@ """), ], include_dirs=self.numpy_include, prologue=''' + #ifdef PYPY_VERSION + #include + #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include #ifndef PYPY_VERSION #include /*generated by numpy setup.py*/ - #else - #include #endif typedef void (*PyUFuncGenericFunction) (char **args, @@ -471,7 +479,7 @@ }; ''', more_init = ''' - #ifndef PYPY_VER + #ifndef PYPY_VERSION import_array(); #endif ''') From pypy.commits at gmail.com Tue Apr 19 02:44:12 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 18 Apr 2016 23:44:12 -0700 (PDT) Subject: [pypy-commit] pypy default: add Message-ID: <5715d3bc.49f9c20a.492ef.fffff19f@mx.google.com> Author: mattip Branch: Changeset: r83757:73a49ec9edc3 Date: 2016-04-19 09:43 +0300 http://bitbucket.org/pypy/pypy/changeset/73a49ec9edc3/ Log: add diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty new file mode 100644 --- /dev/null +++ b/lib_pypy/ctypes_config_cache/.empty @@ -0,0 +1,1 @@ +dummy file to allow old buildbot configuration to run From pypy.commits at gmail.com Tue Apr 19 03:09:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 00:09:26 -0700 (PDT) Subject: [pypy-commit] buildbot default: added a new builder that I have access to (s390x) own-linux Message-ID: <5715d9a6.0976c20a.c0c21.ffffd216@mx.google.com> Author: Richard Plangger Branch: Changeset: r998:66858fbd0d4a Date: 2016-04-19 09:08 +0200 http://bitbucket.org/pypy/buildbot/changeset/66858fbd0d4a/ Log: added a new builder that I have access to (s390x) own-linux diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -175,6 +175,7 @@ LINUX32 = "own-linux-x86-32" LINUX64 = "own-linux-x86-64" LINUX_S390X = "own-linux-s390x" +LINUX_S390X_2 = "own-linux-s390x-2" MACOSX32 = "own-macosx-x86-32" WIN32 = "own-win-x86-32" @@ -526,6 +527,12 @@ "factory": pypyOwnTestFactory, "category": 's390x', }, + {"name": LINUX_S390X_2, + "slavenames": ["s390x-slave"], + "builddir": LINUX_S390X_2, + "factory": pypyOwnTestFactory, + "category": 's390x', + }, {'name': JITLINUX_S390X, 'slavenames': ["dje"], 'builddir': JITLINUX_S390X, From pypy.commits at gmail.com Tue Apr 19 03:28:47 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 00:28:47 -0700 (PDT) Subject: [pypy-commit] buildbot default: added own-linux-s390x-2 as a nightly Message-ID: <5715de2f.519d1c0a.6dfa.ffffca8a@mx.google.com> Author: Richard Plangger Branch: Changeset: r999:e3e3bbc1d32e Date: 2016-04-19 09:28 +0200 http://bitbucket.org/pypy/buildbot/changeset/e3e3bbc1d32e/ Log: added own-linux-s390x-2 as a nightly diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -312,6 +312,7 @@ Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0), Nightly("nightly-4-01", [JITLINUX_S390X], branch='default', hour=2, minute=0), Nightly("nightly-4-02", [JITLINUX_S390X_2], branch='default', hour=2, minute=0), + Nightly("nightly-4-03", [LINUX_S390X_2], branch='default', hour=0, minute=0), # this one has faithfully run every night even though the latest # change to that branch was in January 2013. Re-enable one day. From pypy.commits at gmail.com Tue Apr 19 03:45:31 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Apr 2016 00:45:31 -0700 (PDT) Subject: [pypy-commit] pypy default: try to reduce the size of concrete syntax tree nodes Message-ID: <5715e21b.858e1c0a.f880e.ffffd605@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83758:86afca9e9b63 Date: 2016-04-19 00:03 +0300 http://bitbucket.org/pypy/pypy/changeset/86afca9e9b63/ Log: try to reduce the size of concrete syntax tree nodes woah, astbuilder is a mess diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -54,24 +54,24 @@ n = self.root_node if n.type == syms.file_input: stmts = [] - for i in range(len(n.children) - 1): - stmt = n.children[i] + for i in range(n.num_children() - 1): + stmt = n.get_child(i) if stmt.type == tokens.NEWLINE: continue sub_stmts_count = self.number_of_statements(stmt) if sub_stmts_count == 1: stmts.append(self.handle_stmt(stmt)) else: - stmt = stmt.children[0] + stmt = stmt.get_child(0) for j in range(sub_stmts_count): - small_stmt = stmt.children[j * 2] + small_stmt = stmt.get_child(j * 2) stmts.append(self.handle_stmt(small_stmt)) return ast.Module(stmts) elif n.type == syms.eval_input: - body = self.handle_testlist(n.children[0]) + body = self.handle_testlist(n.get_child(0)) return ast.Expression(body) elif n.type == syms.single_input: - first_child = n.children[0] + first_child = n.get_child(0) if first_child.type == tokens.NEWLINE: # An empty line. return ast.Interactive([]) @@ -81,8 +81,8 @@ stmts = [self.handle_stmt(first_child)] else: stmts = [] - for i in range(0, len(first_child.children), 2): - stmt = first_child.children[i] + for i in range(0, first_child.num_children(), 2): + stmt = first_child.get_child(i) if stmt.type == tokens.NEWLINE: break stmts.append(self.handle_stmt(stmt)) @@ -96,16 +96,16 @@ if stmt_type == syms.compound_stmt: return 1 elif stmt_type == syms.stmt: - return self.number_of_statements(n.children[0]) + return self.number_of_statements(n.get_child(0)) elif stmt_type == syms.simple_stmt: # Divide to remove semi-colons. - return len(n.children) // 2 + return n.num_children() // 2 else: raise AssertionError("non-statement node") def error(self, msg, n): """Raise a SyntaxError with the lineno and column set to n's.""" - raise SyntaxError(msg, n.lineno, n.column, + raise SyntaxError(msg, n.get_lineno(), n.get_column(), filename=self.compile_info.filename) def error_ast(self, msg, ast_node): @@ -132,51 +132,51 @@ expressions = None newline = True start = 1 - child_count = len(print_node.children) - if child_count > 2 and print_node.children[1].type == tokens.RIGHTSHIFT: - dest = self.handle_expr(print_node.children[2]) + child_count = print_node.num_children() + if child_count > 2 and print_node.get_child(1).type == tokens.RIGHTSHIFT: + dest = self.handle_expr(print_node.get_child(2)) start = 4 if (child_count + 1 - start) // 2: - expressions = [self.handle_expr(print_node.children[i]) + expressions = [self.handle_expr(print_node.get_child(i)) for i in range(start, child_count, 2)] - if print_node.children[-1].type == tokens.COMMA: + if print_node.get_child(-1).type == tokens.COMMA: newline = False - return ast.Print(dest, expressions, newline, print_node.lineno, - print_node.column) + return ast.Print(dest, expressions, newline, print_node.get_lineno(), + print_node.get_column()) def handle_del_stmt(self, del_node): - targets = self.handle_exprlist(del_node.children[1], ast.Del) - return ast.Delete(targets, del_node.lineno, del_node.column) + targets = self.handle_exprlist(del_node.get_child(1), ast.Del) + return ast.Delete(targets, del_node.get_lineno(), del_node.get_column()) def handle_flow_stmt(self, flow_node): - first_child = flow_node.children[0] + first_child = flow_node.get_child(0) first_child_type = first_child.type if first_child_type == syms.break_stmt: - return ast.Break(flow_node.lineno, flow_node.column) + return ast.Break(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.continue_stmt: - return ast.Continue(flow_node.lineno, flow_node.column) + return ast.Continue(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.yield_stmt: - yield_expr = self.handle_expr(first_child.children[0]) - return ast.Expr(yield_expr, flow_node.lineno, flow_node.column) + yield_expr = self.handle_expr(first_child.get_child(0)) + return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.return_stmt: - if len(first_child.children) == 1: + if first_child.num_children() == 1: values = None else: - values = self.handle_testlist(first_child.children[1]) - return ast.Return(values, flow_node.lineno, flow_node.column) + values = self.handle_testlist(first_child.get_child(1)) + return ast.Return(values, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.raise_stmt: exc = None value = None traceback = None - child_count = len(first_child.children) + child_count = first_child.num_children() if child_count >= 2: - exc = self.handle_expr(first_child.children[1]) + exc = self.handle_expr(first_child.get_child(1)) if child_count >= 4: - value = self.handle_expr(first_child.children[3]) + value = self.handle_expr(first_child.get_child(3)) if child_count == 6: - traceback = self.handle_expr(first_child.children[5]) - return ast.Raise(exc, value, traceback, flow_node.lineno, - flow_node.column) + traceback = self.handle_expr(first_child.get_child(5)) + return ast.Raise(exc, value, traceback, flow_node.get_lineno(), + flow_node.get_column()) else: raise AssertionError("unknown flow statement") @@ -184,32 +184,32 @@ while True: import_name_type = import_name.type if import_name_type == syms.import_as_name: - name = import_name.children[0].value - if len(import_name.children) == 3: - as_name = import_name.children[2].value - self.check_forbidden_name(as_name, import_name.children[2]) + name = import_name.get_child(0).get_value() + if import_name.num_children() == 3: + as_name = import_name.get_child(2).get_value() + self.check_forbidden_name(as_name, import_name.get_child(2)) else: as_name = None - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, as_name) elif import_name_type == syms.dotted_as_name: - if len(import_name.children) == 1: - import_name = import_name.children[0] + if import_name.num_children() == 1: + import_name = import_name.get_child(0) continue - alias = self.alias_for_import_name(import_name.children[0], + alias = self.alias_for_import_name(import_name.get_child(0), store=False) - asname_node = import_name.children[2] - alias.asname = asname_node.value + asname_node = import_name.get_child(2) + alias.asname = asname_node.get_value() self.check_forbidden_name(alias.asname, asname_node) return alias elif import_name_type == syms.dotted_name: - if len(import_name.children) == 1: - name = import_name.children[0].value + if import_name.num_children() == 1: + name = import_name.get_child(0).get_value() if store: - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, None) - name_parts = [import_name.children[i].value - for i in range(0, len(import_name.children), 2)] + name_parts = [import_name.get_child(i).get_value() + for i in range(0, import_name.num_children(), 2)] name = ".".join(name_parts) return ast.alias(name, None) elif import_name_type == tokens.STAR: @@ -218,20 +218,20 @@ raise AssertionError("unknown import name") def handle_import_stmt(self, import_node): - import_node = import_node.children[0] + import_node = import_node.get_child(0) if import_node.type == syms.import_name: - dotted_as_names = import_node.children[1] - aliases = [self.alias_for_import_name(dotted_as_names.children[i]) - for i in range(0, len(dotted_as_names.children), 2)] - return ast.Import(aliases, import_node.lineno, import_node.column) + dotted_as_names = import_node.get_child(1) + aliases = [self.alias_for_import_name(dotted_as_names.get_child(i)) + for i in range(0, dotted_as_names.num_children(), 2)] + return ast.Import(aliases, import_node.get_lineno(), import_node.get_column()) elif import_node.type == syms.import_from: - child_count = len(import_node.children) + child_count = import_node.num_children() module = None modname = None i = 1 dot_count = 0 while i < child_count: - child = import_node.children[i] + child = import_node.get_child(i) if child.type == syms.dotted_name: module = self.alias_for_import_name(child, False) i += 1 @@ -241,16 +241,16 @@ i += 1 dot_count += 1 i += 1 - after_import_type = import_node.children[i].type + after_import_type = import_node.get_child(i).type star_import = False if after_import_type == tokens.STAR: - names_node = import_node.children[i] + names_node = import_node.get_child(i) star_import = True elif after_import_type == tokens.LPAR: - names_node = import_node.children[i + 1] + names_node = import_node.get_child(i + 1) elif after_import_type == syms.import_as_names: - names_node = import_node.children[i] - if len(names_node.children) % 2 == 0: + names_node = import_node.get_child(i) + if names_node.num_children() % 2 == 0: self.error("trailing comma is only allowed with " "surronding parenthesis", names_node) else: @@ -258,25 +258,25 @@ if star_import: aliases = [self.alias_for_import_name(names_node)] else: - aliases = [self.alias_for_import_name(names_node.children[i]) - for i in range(0, len(names_node.children), 2)] + aliases = [self.alias_for_import_name(names_node.get_child(i)) + for i in range(0, names_node.num_children(), 2)] if module is not None: modname = module.name return ast.ImportFrom(modname, aliases, dot_count, - import_node.lineno, import_node.column) + import_node.get_lineno(), import_node.get_column()) else: raise AssertionError("unknown import node") def handle_global_stmt(self, global_node): - names = [global_node.children[i].value - for i in range(1, len(global_node.children), 2)] - return ast.Global(names, global_node.lineno, global_node.column) + names = [global_node.get_child(i).get_value() + for i in range(1, global_node.num_children(), 2)] + return ast.Global(names, global_node.get_lineno(), global_node.get_column()) def handle_exec_stmt(self, exec_node): - child_count = len(exec_node.children) + child_count = exec_node.num_children() globs = None locs = None - to_execute = self.handle_expr(exec_node.children[1]) + to_execute = self.handle_expr(exec_node.get_child(1)) if child_count < 4: if isinstance(to_execute, ast.Tuple) and \ (len(to_execute.elts) == 2 or len(to_execute.elts) == 3): @@ -285,272 +285,273 @@ locs = to_execute.elts[2] to_execute = to_execute.elts[0] elif child_count >= 4: - globs = self.handle_expr(exec_node.children[3]) + globs = self.handle_expr(exec_node.get_child(3)) if child_count == 6: - locs = self.handle_expr(exec_node.children[5]) - return ast.Exec(to_execute, globs, locs, exec_node.lineno, - exec_node.column) + locs = self.handle_expr(exec_node.get_child(5)) + return ast.Exec(to_execute, globs, locs, exec_node.get_lineno(), + exec_node.get_column()) def handle_assert_stmt(self, assert_node): - expr = self.handle_expr(assert_node.children[1]) + expr = self.handle_expr(assert_node.get_child(1)) msg = None - if len(assert_node.children) == 4: - msg = self.handle_expr(assert_node.children[3]) - return ast.Assert(expr, msg, assert_node.lineno, assert_node.column) + if assert_node.num_children() == 4: + msg = self.handle_expr(assert_node.get_child(3)) + return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column()) def handle_suite(self, suite_node): - first_child = suite_node.children[0] + first_child = suite_node.get_child(0) if first_child.type == syms.simple_stmt: - end = len(first_child.children) - 1 - if first_child.children[end - 1].type == tokens.SEMI: + end = first_child.num_children() - 1 + if first_child.get_child(end - 1).type == tokens.SEMI: end -= 1 - stmts = [self.handle_stmt(first_child.children[i]) + stmts = [self.handle_stmt(first_child.get_child(i)) for i in range(0, end, 2)] else: stmts = [] - for i in range(2, len(suite_node.children) - 1): - stmt = suite_node.children[i] + for i in range(2, suite_node.num_children() - 1): + stmt = suite_node.get_child(i) stmt_count = self.number_of_statements(stmt) if stmt_count == 1: stmts.append(self.handle_stmt(stmt)) else: - simple_stmt = stmt.children[0] - for j in range(0, len(simple_stmt.children), 2): - stmt = simple_stmt.children[j] - if not stmt.children: + simple_stmt = stmt.get_child(0) + for j in range(0, simple_stmt.num_children(), 2): + stmt = simple_stmt.get_child(j) + if not stmt.num_children(): break stmts.append(self.handle_stmt(stmt)) return stmts def handle_if_stmt(self, if_node): - child_count = len(if_node.children) + child_count = if_node.num_children() if child_count == 4: - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - return ast.If(test, suite, None, if_node.lineno, if_node.column) - otherwise_string = if_node.children[4].value + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column()) + otherwise_string = if_node.get_child(4).get_value() if otherwise_string == "else": - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - else_suite = self.handle_suite(if_node.children[6]) - return ast.If(test, suite, else_suite, if_node.lineno, - if_node.column) + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + else_suite = self.handle_suite(if_node.get_child(6)) + return ast.If(test, suite, else_suite, if_node.get_lineno(), + if_node.get_column()) elif otherwise_string == "elif": elif_count = child_count - 4 - after_elif = if_node.children[elif_count + 1] + after_elif = if_node.get_child(elif_count + 1) if after_elif.type == tokens.NAME and \ - after_elif.value == "else": + after_elif.get_value() == "else": has_else = True elif_count -= 3 else: has_else = False elif_count /= 4 if has_else: - last_elif = if_node.children[-6] + last_elif = if_node.get_child(-6) last_elif_test = self.handle_expr(last_elif) - elif_body = self.handle_suite(if_node.children[-4]) - else_body = self.handle_suite(if_node.children[-1]) + elif_body = self.handle_suite(if_node.get_child(-4)) + else_body = self.handle_suite(if_node.get_child(-1)) otherwise = [ast.If(last_elif_test, elif_body, else_body, - last_elif.lineno, last_elif.column)] + last_elif.get_lineno(), last_elif.get_column())] elif_count -= 1 else: otherwise = None for i in range(elif_count): offset = 5 + (elif_count - i - 1) * 4 - elif_test_node = if_node.children[offset] + elif_test_node = if_node.get_child(offset) elif_test = self.handle_expr(elif_test_node) - elif_body = self.handle_suite(if_node.children[offset + 2]) + elif_body = self.handle_suite(if_node.get_child(offset + 2)) new_if = ast.If(elif_test, elif_body, otherwise, - elif_test_node.lineno, elif_test_node.column) + elif_test_node.get_lineno(), elif_test_node.get_column()) otherwise = [new_if] - expr = self.handle_expr(if_node.children[1]) - body = self.handle_suite(if_node.children[3]) - return ast.If(expr, body, otherwise, if_node.lineno, if_node.column) + expr = self.handle_expr(if_node.get_child(1)) + body = self.handle_suite(if_node.get_child(3)) + return ast.If(expr, body, otherwise, if_node.get_lineno(), if_node.get_column()) else: raise AssertionError("unknown if statement configuration") def handle_while_stmt(self, while_node): - loop_test = self.handle_expr(while_node.children[1]) - body = self.handle_suite(while_node.children[3]) - if len(while_node.children) == 7: - otherwise = self.handle_suite(while_node.children[6]) + loop_test = self.handle_expr(while_node.get_child(1)) + body = self.handle_suite(while_node.get_child(3)) + if while_node.num_children() == 7: + otherwise = self.handle_suite(while_node.get_child(6)) else: otherwise = None - return ast.While(loop_test, body, otherwise, while_node.lineno, - while_node.column) + return ast.While(loop_test, body, otherwise, while_node.get_lineno(), + while_node.get_column()) def handle_for_stmt(self, for_node): - target_node = for_node.children[1] + target_node = for_node.get_child(1) target_as_exprlist = self.handle_exprlist(target_node, ast.Store) - if len(target_node.children) == 1: + if target_node.num_children() == 1: target = target_as_exprlist[0] else: target = ast.Tuple(target_as_exprlist, ast.Store, - target_node.lineno, target_node.column) - expr = self.handle_testlist(for_node.children[3]) - body = self.handle_suite(for_node.children[5]) - if len(for_node.children) == 9: - otherwise = self.handle_suite(for_node.children[8]) + target_node.get_lineno(), target_node.get_column()) + expr = self.handle_testlist(for_node.get_child(3)) + body = self.handle_suite(for_node.get_child(5)) + if for_node.num_children() == 9: + otherwise = self.handle_suite(for_node.get_child(8)) else: otherwise = None - return ast.For(target, expr, body, otherwise, for_node.lineno, - for_node.column) + return ast.For(target, expr, body, otherwise, for_node.get_lineno(), + for_node.get_column()) def handle_except_clause(self, exc, body): test = None target = None suite = self.handle_suite(body) - child_count = len(exc.children) + child_count = exc.num_children() if child_count >= 2: - test = self.handle_expr(exc.children[1]) + test = self.handle_expr(exc.get_child(1)) if child_count == 4: - target_child = exc.children[3] + target_child = exc.get_child(3) target = self.handle_expr(target_child) self.set_context(target, ast.Store) - return ast.ExceptHandler(test, target, suite, exc.lineno, exc.column) + return ast.ExceptHandler(test, target, suite, exc.get_lineno(), exc.get_column()) def handle_try_stmt(self, try_node): - body = self.handle_suite(try_node.children[2]) - child_count = len(try_node.children) + body = self.handle_suite(try_node.get_child(2)) + child_count = try_node.num_children() except_count = (child_count - 3 ) // 3 otherwise = None finally_suite = None - possible_extra_clause = try_node.children[-3] + possible_extra_clause = try_node.get_child(-3) if possible_extra_clause.type == tokens.NAME: - if possible_extra_clause.value == "finally": + if possible_extra_clause.get_value() == "finally": if child_count >= 9 and \ - try_node.children[-6].type == tokens.NAME: - otherwise = self.handle_suite(try_node.children[-4]) + try_node.get_child(-6).type == tokens.NAME: + otherwise = self.handle_suite(try_node.get_child(-4)) except_count -= 1 - finally_suite = self.handle_suite(try_node.children[-1]) + finally_suite = self.handle_suite(try_node.get_child(-1)) except_count -= 1 else: - otherwise = self.handle_suite(try_node.children[-1]) + otherwise = self.handle_suite(try_node.get_child(-1)) except_count -= 1 if except_count: handlers = [] for i in range(except_count): base_offset = i * 3 - exc = try_node.children[3 + base_offset] - except_body = try_node.children[5 + base_offset] + exc = try_node.get_child(3 + base_offset) + except_body = try_node.get_child(5 + base_offset) handlers.append(self.handle_except_clause(exc, except_body)) except_ast = ast.TryExcept(body, handlers, otherwise, - try_node.lineno, try_node.column) + try_node.get_lineno(), try_node.get_column()) if finally_suite is None: return except_ast body = [except_ast] - return ast.TryFinally(body, finally_suite, try_node.lineno, - try_node.column) + return ast.TryFinally(body, finally_suite, try_node.get_lineno(), + try_node.get_column()) def handle_with_stmt(self, with_node): - body = self.handle_suite(with_node.children[-1]) - i = len(with_node.children) - 1 + body = self.handle_suite(with_node.get_child(-1)) + i = with_node.num_children() - 1 while True: i -= 2 - item = with_node.children[i] - test = self.handle_expr(item.children[0]) - if len(item.children) == 3: - target = self.handle_expr(item.children[2]) + item = with_node.get_child(i) + test = self.handle_expr(item.get_child(0)) + if item.num_children() == 3: + target = self.handle_expr(item.get_child(2)) self.set_context(target, ast.Store) else: target = None - wi = ast.With(test, target, body, with_node.lineno, - with_node.column) + wi = ast.With(test, target, body, with_node.get_lineno(), + with_node.get_column()) if i == 1: break body = [wi] return wi def handle_classdef(self, classdef_node, decorators=None): - name_node = classdef_node.children[1] - name = name_node.value + name_node = classdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - if len(classdef_node.children) == 4: - body = self.handle_suite(classdef_node.children[3]) + if classdef_node.num_children() == 4: + body = self.handle_suite(classdef_node.get_child(3)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - if classdef_node.children[3].type == tokens.RPAR: - body = self.handle_suite(classdef_node.children[5]) + classdef_node.get_lineno(), classdef_node.get_column()) + if classdef_node.get_child(3).type == tokens.RPAR: + body = self.handle_suite(classdef_node.get_child(5)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - bases = self.handle_class_bases(classdef_node.children[3]) - body = self.handle_suite(classdef_node.children[6]) - return ast.ClassDef(name, bases, body, decorators, classdef_node.lineno, - classdef_node.column) + classdef_node.get_lineno(), classdef_node.get_column()) + bases = self.handle_class_bases(classdef_node.get_child(3)) + body = self.handle_suite(classdef_node.get_child(6)) + return ast.ClassDef(name, bases, body, decorators, classdef_node.get_lineno(), + classdef_node.get_column()) def handle_class_bases(self, bases_node): - if len(bases_node.children) == 1: - return [self.handle_expr(bases_node.children[0])] + if bases_node.num_children() == 1: + return [self.handle_expr(bases_node.get_child(0))] return self.get_expression_list(bases_node) def handle_funcdef(self, funcdef_node, decorators=None): - name_node = funcdef_node.children[1] - name = name_node.value + name_node = funcdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - args = self.handle_arguments(funcdef_node.children[2]) - body = self.handle_suite(funcdef_node.children[4]) + args = self.handle_arguments(funcdef_node.get_child(2)) + body = self.handle_suite(funcdef_node.get_child(4)) return ast.FunctionDef(name, args, body, decorators, - funcdef_node.lineno, funcdef_node.column) + funcdef_node.get_lineno(), funcdef_node.get_column()) def handle_decorated(self, decorated_node): - decorators = self.handle_decorators(decorated_node.children[0]) - definition = decorated_node.children[1] + decorators = self.handle_decorators(decorated_node.get_child(0)) + definition = decorated_node.get_child(1) if definition.type == syms.funcdef: node = self.handle_funcdef(definition, decorators) elif definition.type == syms.classdef: node = self.handle_classdef(definition, decorators) else: raise AssertionError("unkown decorated") - node.lineno = decorated_node.lineno - node.col_offset = decorated_node.column + node.lineno = decorated_node.get_lineno() + node.col_offset = decorated_node.get_column() return node def handle_decorators(self, decorators_node): - return [self.handle_decorator(dec) for dec in decorators_node.children] + return [self.handle_decorator(decorators_node.get_child(i)) + for i in range(decorators_node.num_children())] def handle_decorator(self, decorator_node): - dec_name = self.handle_dotted_name(decorator_node.children[1]) - if len(decorator_node.children) == 3: + dec_name = self.handle_dotted_name(decorator_node.get_child(1)) + if decorator_node.num_children() == 3: dec = dec_name - elif len(decorator_node.children) == 5: + elif decorator_node.num_children() == 5: dec = ast.Call(dec_name, None, None, None, None, - decorator_node.lineno, decorator_node.column) + decorator_node.get_lineno(), decorator_node.get_column()) else: - dec = self.handle_call(decorator_node.children[3], dec_name) + dec = self.handle_call(decorator_node.get_child(3), dec_name) return dec def handle_dotted_name(self, dotted_name_node): - base_value = dotted_name_node.children[0].value - name = ast.Name(base_value, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) - for i in range(2, len(dotted_name_node.children), 2): - attr = dotted_name_node.children[i].value - name = ast.Attribute(name, attr, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) + base_value = dotted_name_node.get_child(0).get_value() + name = ast.Name(base_value, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) + for i in range(2, dotted_name_node.num_children(), 2): + attr = dotted_name_node.get_child(i).get_value() + name = ast.Attribute(name, attr, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) return name def handle_arguments(self, arguments_node): if arguments_node.type == syms.parameters: - if len(arguments_node.children) == 2: + if arguments_node.num_children() == 2: return ast.arguments(None, None, None, None) - arguments_node = arguments_node.children[1] + arguments_node = arguments_node.get_child(1) i = 0 - child_count = len(arguments_node.children) + child_count = arguments_node.num_children() defaults = [] args = [] variable_arg = None keywords_arg = None have_default = False while i < child_count: - argument = arguments_node.children[i] + argument = arguments_node.get_child(i) arg_type = argument.type if arg_type == syms.fpdef: parenthesized = False complex_args = False while True: if i + 1 < child_count and \ - arguments_node.children[i + 1].type == tokens.EQUAL: - default_node = arguments_node.children[i + 2] + arguments_node.get_child(i + 1).type == tokens.EQUAL: + default_node = arguments_node.get_child(i + 2) defaults.append(self.handle_expr(default_node)) i += 2 have_default = True @@ -561,32 +562,32 @@ msg = ("non-default argument follows default " "argument") self.error(msg, arguments_node) - if len(argument.children) == 3: - sub_arg = argument.children[1] - if len(sub_arg.children) != 1: + if argument.num_children() == 3: + sub_arg = argument.get_child(1) + if sub_arg.num_children() != 1: complex_args = True args.append(self.handle_arg_unpacking(sub_arg)) else: parenthesized = True - argument = sub_arg.children[0] + argument = sub_arg.get_child(0) continue - if argument.children[0].type == tokens.NAME: - name_node = argument.children[0] - arg_name = name_node.value + if argument.get_child(0).type == tokens.NAME: + name_node = argument.get_child(0) + arg_name = name_node.get_value() self.check_forbidden_name(arg_name, name_node) - name = ast.Name(arg_name, ast.Param, name_node.lineno, - name_node.column) + name = ast.Name(arg_name, ast.Param, name_node.get_lineno(), + name_node.get_column()) args.append(name) i += 2 break elif arg_type == tokens.STAR: - name_node = arguments_node.children[i + 1] - variable_arg = name_node.value + name_node = arguments_node.get_child(i + 1) + variable_arg = name_node.get_value() self.check_forbidden_name(variable_arg, name_node) i += 3 elif arg_type == tokens.DOUBLESTAR: - name_node = arguments_node.children[i + 1] - keywords_arg = name_node.value + name_node = arguments_node.get_child(i + 1) + keywords_arg = name_node.get_value() self.check_forbidden_name(keywords_arg, name_node) i += 3 else: @@ -599,35 +600,35 @@ def handle_arg_unpacking(self, fplist_node): args = [] - for i in range((len(fplist_node.children) + 1) / 2): - fpdef_node = fplist_node.children[i * 2] + for i in range((fplist_node.num_children() + 1) / 2): + fpdef_node = fplist_node.get_child(i * 2) while True: - child = fpdef_node.children[0] + child = fpdef_node.get_child(0) if child.type == tokens.NAME: - arg = ast.Name(child.value, ast.Store, child.lineno, - child.column) + arg = ast.Name(child.get_value(), ast.Store, child.get_lineno(), + child.get_column()) args.append(arg) else: - child = fpdef_node.children[1] - if len(child.children) == 1: - fpdef_node = child.children[0] + child = fpdef_node.get_child(1) + if child.num_children() == 1: + fpdef_node = child.get_child(0) continue args.append(self.handle_arg_unpacking(child)) break - tup = ast.Tuple(args, ast.Store, fplist_node.lineno, fplist_node.column) + tup = ast.Tuple(args, ast.Store, fplist_node.get_lineno(), fplist_node.get_column()) self.set_context(tup, ast.Store) return tup def handle_stmt(self, stmt): stmt_type = stmt.type if stmt_type == syms.stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.simple_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.small_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.expr_stmt: return self.handle_expr_stmt(stmt) @@ -636,7 +637,7 @@ elif stmt_type == syms.del_stmt: return self.handle_del_stmt(stmt) elif stmt_type == syms.pass_stmt: - return ast.Pass(stmt.lineno, stmt.column) + return ast.Pass(stmt.get_lineno(), stmt.get_column()) elif stmt_type == syms.flow_stmt: return self.handle_flow_stmt(stmt) elif stmt_type == syms.import_stmt: @@ -650,7 +651,7 @@ else: raise AssertionError("unhandled small statement") elif stmt_type == syms.compound_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.if_stmt: return self.handle_if_stmt(stmt) @@ -674,113 +675,113 @@ raise AssertionError("unknown statment type") def handle_expr_stmt(self, stmt): - if len(stmt.children) == 1: - expression = self.handle_testlist(stmt.children[0]) - return ast.Expr(expression, stmt.lineno, stmt.column) - elif stmt.children[1].type == syms.augassign: + if stmt.num_children() == 1: + expression = self.handle_testlist(stmt.get_child(0)) + return ast.Expr(expression, stmt.get_lineno(), stmt.get_column()) + elif stmt.get_child(1).type == syms.augassign: # Augmented assignment. - target_child = stmt.children[0] + target_child = stmt.get_child(0) target_expr = self.handle_testlist(target_child) self.set_context(target_expr, ast.Store) - value_child = stmt.children[2] + value_child = stmt.get_child(2) if value_child.type == syms.testlist: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - op_str = stmt.children[1].children[0].value + op_str = stmt.get_child(1).get_child(0).get_value() operator = augassign_operator_map[op_str] return ast.AugAssign(target_expr, operator, value_expr, - stmt.lineno, stmt.column) + stmt.get_lineno(), stmt.get_column()) else: # Normal assignment. targets = [] - for i in range(0, len(stmt.children) - 2, 2): - target_node = stmt.children[i] + for i in range(0, stmt.num_children() - 2, 2): + target_node = stmt.get_child(i) if target_node.type == syms.yield_expr: self.error("can't assign to yield expression", target_node) target_expr = self.handle_testlist(target_node) self.set_context(target_expr, ast.Store) targets.append(target_expr) - value_child = stmt.children[-1] + value_child = stmt.get_child(-1) if value_child.type == syms.testlist: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - return ast.Assign(targets, value_expr, stmt.lineno, stmt.column) + return ast.Assign(targets, value_expr, stmt.get_lineno(), stmt.get_column()) def get_expression_list(self, tests): - return [self.handle_expr(tests.children[i]) - for i in range(0, len(tests.children), 2)] + return [self.handle_expr(tests.get_child(i)) + for i in range(0, tests.num_children(), 2)] def handle_testlist(self, tests): - if len(tests.children) == 1: - return self.handle_expr(tests.children[0]) + if tests.num_children() == 1: + return self.handle_expr(tests.get_child(0)) else: elts = self.get_expression_list(tests) - return ast.Tuple(elts, ast.Load, tests.lineno, tests.column) + return ast.Tuple(elts, ast.Load, tests.get_lineno(), tests.get_column()) def handle_expr(self, expr_node): # Loop until we return something. while True: expr_node_type = expr_node.type if expr_node_type == syms.test or expr_node_type == syms.old_test: - first_child = expr_node.children[0] + first_child = expr_node.get_child(0) if first_child.type in (syms.lambdef, syms.old_lambdef): return self.handle_lambdef(first_child) - elif len(expr_node.children) > 1: + elif expr_node.num_children() > 1: return self.handle_ifexp(expr_node) else: expr_node = first_child elif expr_node_type == syms.or_test or \ expr_node_type == syms.and_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - seq = [self.handle_expr(expr_node.children[i]) - for i in range(0, len(expr_node.children), 2)] + seq = [self.handle_expr(expr_node.get_child(i)) + for i in range(0, expr_node.num_children(), 2)] if expr_node_type == syms.or_test: op = ast.Or else: op = ast.And - return ast.BoolOp(op, seq, expr_node.lineno, expr_node.column) + return ast.BoolOp(op, seq, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.not_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - expr = self.handle_expr(expr_node.children[1]) - return ast.UnaryOp(ast.Not, expr, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(1)) + return ast.UnaryOp(ast.Not, expr, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.comparison: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue operators = [] operands = [] - expr = self.handle_expr(expr_node.children[0]) - for i in range(1, len(expr_node.children), 2): - operators.append(self.handle_comp_op(expr_node.children[i])) - operands.append(self.handle_expr(expr_node.children[i + 1])) - return ast.Compare(expr, operators, operands, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(0)) + for i in range(1, expr_node.num_children(), 2): + operators.append(self.handle_comp_op(expr_node.get_child(i))) + operands.append(self.handle_expr(expr_node.get_child(i + 1))) + return ast.Compare(expr, operators, operands, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.expr or \ expr_node_type == syms.xor_expr or \ expr_node_type == syms.and_expr or \ expr_node_type == syms.shift_expr or \ expr_node_type == syms.arith_expr or \ expr_node_type == syms.term: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_binop(expr_node) elif expr_node_type == syms.yield_expr: - if len(expr_node.children) == 2: - exp = self.handle_testlist(expr_node.children[1]) + if expr_node.num_children() == 2: + exp = self.handle_testlist(expr_node.get_child(1)) else: exp = None - return ast.Yield(exp, expr_node.lineno, expr_node.column) + return ast.Yield(exp, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.factor: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_factor(expr_node) elif expr_node_type == syms.power: @@ -789,24 +790,24 @@ raise AssertionError("unknown expr") def handle_lambdef(self, lambdef_node): - expr = self.handle_expr(lambdef_node.children[-1]) - if len(lambdef_node.children) == 3: + expr = self.handle_expr(lambdef_node.get_child(-1)) + if lambdef_node.num_children() == 3: args = ast.arguments(None, None, None, None) else: - args = self.handle_arguments(lambdef_node.children[1]) - return ast.Lambda(args, expr, lambdef_node.lineno, lambdef_node.column) + args = self.handle_arguments(lambdef_node.get_child(1)) + return ast.Lambda(args, expr, lambdef_node.get_lineno(), lambdef_node.get_column()) def handle_ifexp(self, if_expr_node): - body = self.handle_expr(if_expr_node.children[0]) - expression = self.handle_expr(if_expr_node.children[2]) - otherwise = self.handle_expr(if_expr_node.children[4]) - return ast.IfExp(expression, body, otherwise, if_expr_node.lineno, - if_expr_node.column) + body = self.handle_expr(if_expr_node.get_child(0)) + expression = self.handle_expr(if_expr_node.get_child(2)) + otherwise = self.handle_expr(if_expr_node.get_child(4)) + return ast.IfExp(expression, body, otherwise, if_expr_node.get_lineno(), + if_expr_node.get_column()) def handle_comp_op(self, comp_op_node): - comp_node = comp_op_node.children[0] + comp_node = comp_op_node.get_child(0) comp_type = comp_node.type - if len(comp_op_node.children) == 1: + if comp_op_node.num_children() == 1: if comp_type == tokens.LESS: return ast.Lt elif comp_type == tokens.GREATER: @@ -820,53 +821,55 @@ elif comp_type == tokens.NOTEQUAL: return ast.NotEq elif comp_type == tokens.NAME: - if comp_node.value == "is": + if comp_node.get_value() == "is": return ast.Is - elif comp_node.value == "in": + elif comp_node.get_value() == "in": return ast.In else: raise AssertionError("invalid comparison") else: raise AssertionError("invalid comparison") else: - if comp_op_node.children[1].value == "in": + if comp_op_node.get_child(1).get_value() == "in": return ast.NotIn - elif comp_node.value == "is": + elif comp_node.get_value() == "is": return ast.IsNot else: raise AssertionError("invalid comparison") def handle_binop(self, binop_node): - left = self.handle_expr(binop_node.children[0]) - right = self.handle_expr(binop_node.children[2]) - op = operator_map(binop_node.children[1].type) - result = ast.BinOp(left, op, right, binop_node.lineno, - binop_node.column) - number_of_ops = (len(binop_node.children) - 1) / 2 + left = self.handle_expr(binop_node.get_child(0)) + right = self.handle_expr(binop_node.get_child(2)) + op = operator_map(binop_node.get_child(1).type) + result = ast.BinOp(left, op, right, binop_node.get_lineno(), + binop_node.get_column()) + number_of_ops = (binop_node.num_children() - 1) / 2 for i in range(1, number_of_ops): - op_node = binop_node.children[i * 2 + 1] + op_node = binop_node.get_child(i * 2 + 1) op = operator_map(op_node.type) - sub_right = self.handle_expr(binop_node.children[i * 2 + 2]) - result = ast.BinOp(result, op, sub_right, op_node.lineno, - op_node.column) + sub_right = self.handle_expr(binop_node.get_child(i * 2 + 2)) + result = ast.BinOp(result, op, sub_right, op_node.get_lineno(), + op_node.get_column()) return result def handle_factor(self, factor_node): + from pypy.interpreter.pyparser.parser import Terminal # Fold '-' on constant numbers. - if factor_node.children[0].type == tokens.MINUS and \ - len(factor_node.children) == 2: - factor = factor_node.children[1] - if factor.type == syms.factor and len(factor.children) == 1: - power = factor.children[0] - if power.type == syms.power and len(power.children) == 1: - atom = power.children[0] + if factor_node.get_child(0).type == tokens.MINUS and \ + factor_node.num_children() == 2: + factor = factor_node.get_child(1) + if factor.type == syms.factor and factor.num_children() == 1: + power = factor.get_child(0) + if power.type == syms.power and power.num_children() == 1: + atom = power.get_child(0) if atom.type == syms.atom and \ - atom.children[0].type == tokens.NUMBER: - num = atom.children[0] - num.value = "-" + num.value + atom.get_child(0).type == tokens.NUMBER: + num = atom.get_child(0) + assert isinstance(num, Terminal) + num.value = "-" + num.get_value() return self.handle_atom(atom) - expr = self.handle_expr(factor_node.children[1]) - op_type = factor_node.children[0].type + expr = self.handle_expr(factor_node.get_child(1)) + op_type = factor_node.get_child(0).type if op_type == tokens.PLUS: op = ast.UAdd elif op_type == tokens.MINUS: @@ -875,31 +878,31 @@ op = ast.Invert else: raise AssertionError("invalid factor node") - return ast.UnaryOp(op, expr, factor_node.lineno, factor_node.column) + return ast.UnaryOp(op, expr, factor_node.get_lineno(), factor_node.get_column()) def handle_power(self, power_node): - atom_expr = self.handle_atom(power_node.children[0]) - if len(power_node.children) == 1: + atom_expr = self.handle_atom(power_node.get_child(0)) + if power_node.num_children() == 1: return atom_expr - for i in range(1, len(power_node.children)): - trailer = power_node.children[i] + for i in range(1, power_node.num_children()): + trailer = power_node.get_child(i) if trailer.type != syms.trailer: break tmp_atom_expr = self.handle_trailer(trailer, atom_expr) tmp_atom_expr.lineno = atom_expr.lineno tmp_atom_expr.col_offset = atom_expr.col_offset atom_expr = tmp_atom_expr - if power_node.children[-1].type == syms.factor: - right = self.handle_expr(power_node.children[-1]) - atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.lineno, - power_node.column) + if power_node.get_child(-1).type == syms.factor: + right = self.handle_expr(power_node.get_child(-1)) + atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.get_lineno(), + power_node.get_column()) return atom_expr def handle_slice(self, slice_node): - first_child = slice_node.children[0] + first_child = slice_node.get_child(0) if first_child.type == tokens.DOT: return ast.Ellipsis() - if len(slice_node.children) == 1 and first_child.type == syms.test: + if slice_node.num_children() == 1 and first_child.type == syms.test: index = self.handle_expr(first_child) return ast.Index(index) lower = None @@ -908,71 +911,72 @@ if first_child.type == syms.test: lower = self.handle_expr(first_child) if first_child.type == tokens.COLON: - if len(slice_node.children) > 1: - second_child = slice_node.children[1] + if slice_node.num_children() > 1: + second_child = slice_node.get_child(1) if second_child.type == syms.test: upper = self.handle_expr(second_child) - elif len(slice_node.children) > 2: - third_child = slice_node.children[2] + elif slice_node.num_children() > 2: + third_child = slice_node.get_child(2) if third_child.type == syms.test: upper = self.handle_expr(third_child) - last_child = slice_node.children[-1] + last_child = slice_node.get_child(-1) if last_child.type == syms.sliceop: - if len(last_child.children) == 1: - step = ast.Name("None", ast.Load, last_child.lineno, - last_child.column) + if last_child.num_children() == 1: + step = ast.Name("None", ast.Load, last_child.get_lineno(), + last_child.get_column()) else: - step_child = last_child.children[1] + step_child = last_child.get_child(1) if step_child.type == syms.test: step = self.handle_expr(step_child) return ast.Slice(lower, upper, step) def handle_trailer(self, trailer_node, left_expr): - first_child = trailer_node.children[0] + first_child = trailer_node.get_child(0) if first_child.type == tokens.LPAR: - if len(trailer_node.children) == 2: + if trailer_node.num_children() == 2: return ast.Call(left_expr, None, None, None, None, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - return self.handle_call(trailer_node.children[1], left_expr) + return self.handle_call(trailer_node.get_child(1), left_expr) elif first_child.type == tokens.DOT: - attr = trailer_node.children[1].value + attr = trailer_node.get_child(1).get_value() return ast.Attribute(left_expr, attr, ast.Load, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - middle = trailer_node.children[1] - if len(middle.children) == 1: - slice = self.handle_slice(middle.children[0]) + middle = trailer_node.get_child(1) + if middle.num_children() == 1: + slice = self.handle_slice(middle.get_child(0)) return ast.Subscript(left_expr, slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) slices = [] simple = True - for i in range(0, len(middle.children), 2): - slc = self.handle_slice(middle.children[i]) + for i in range(0, middle.num_children(), 2): + slc = self.handle_slice(middle.get_child(i)) if not isinstance(slc, ast.Index): simple = False slices.append(slc) if not simple: ext_slice = ast.ExtSlice(slices) return ast.Subscript(left_expr, ext_slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) elts = [] for idx in slices: assert isinstance(idx, ast.Index) elts.append(idx.value) - tup = ast.Tuple(elts, ast.Load, middle.lineno, middle.column) + tup = ast.Tuple(elts, ast.Load, middle.get_lineno(), middle.get_column()) return ast.Subscript(left_expr, ast.Index(tup), ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) def handle_call(self, args_node, callable_expr): arg_count = 0 keyword_count = 0 generator_count = 0 - for argument in args_node.children: + for i in range(args_node.num_children()): + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: + if argument.num_children() == 1: arg_count += 1 - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: generator_count += 1 else: keyword_count += 1 @@ -987,13 +991,13 @@ used_keywords = {} variable_arg = None keywords_arg = None - child_count = len(args_node.children) + child_count = args_node.num_children() i = 0 while i < child_count: - argument = args_node.children[i] + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: - expr_node = argument.children[0] + if argument.num_children() == 1: + expr_node = argument.get_child(0) if keywords: self.error("non-keyword arg after keyword arg", expr_node) @@ -1001,10 +1005,10 @@ self.error("only named arguments may follow " "*expression", expr_node) args.append(self.handle_expr(expr_node)) - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: args.append(self.handle_genexp(argument)) else: - keyword_node = argument.children[0] + keyword_node = argument.get_child(0) keyword_expr = self.handle_expr(keyword_node) if isinstance(keyword_expr, ast.Lambda): self.error("lambda cannot contain assignment", @@ -1017,13 +1021,13 @@ self.error("keyword argument repeated", keyword_node) used_keywords[keyword] = None self.check_forbidden_name(keyword, keyword_node) - keyword_value = self.handle_expr(argument.children[2]) + keyword_value = self.handle_expr(argument.get_child(2)) keywords.append(ast.keyword(keyword, keyword_value)) elif argument.type == tokens.STAR: - variable_arg = self.handle_expr(args_node.children[i + 1]) + variable_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 elif argument.type == tokens.DOUBLESTAR: - keywords_arg = self.handle_expr(args_node.children[i + 1]) + keywords_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 i += 1 if not args: @@ -1082,20 +1086,20 @@ return self.space.call_function(self.space.w_float, w_num_str) def handle_atom(self, atom_node): - first_child = atom_node.children[0] + first_child = atom_node.get_child(0) first_child_type = first_child.type if first_child_type == tokens.NAME: - return ast.Name(first_child.value, ast.Load, - first_child.lineno, first_child.column) + return ast.Name(first_child.get_value(), ast.Load, + first_child.get_lineno(), first_child.get_column()) elif first_child_type == tokens.STRING: space = self.space encoding = self.compile_info.encoding flags = self.compile_info.flags unicode_literals = flags & consts.CO_FUTURE_UNICODE_LITERALS try: - sub_strings_w = [parsestring.parsestr(space, encoding, s.value, + sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(), unicode_literals) - for s in atom_node.children] + for i in range(atom_node.num_children())] except error.OperationError, e: if not e.match(space, space.w_UnicodeError): raise @@ -1109,59 +1113,59 @@ final_string = space.call_function(w_join, w_sub_strings) else: final_string = sub_strings_w[0] - return ast.Str(final_string, atom_node.lineno, atom_node.column) + return ast.Str(final_string, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.NUMBER: - num_value = self.parse_number(first_child.value) - return ast.Num(num_value, atom_node.lineno, atom_node.column) + num_value = self.parse_number(first_child.get_value()) + return ast.Num(num_value, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.LPAR: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RPAR: - return ast.Tuple(None, ast.Load, atom_node.lineno, - atom_node.column) + return ast.Tuple(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) elif second_child.type == syms.yield_expr: return self.handle_expr(second_child) return self.handle_testlist_gexp(second_child) elif first_child_type == tokens.LSQB: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RSQB: - return ast.List(None, ast.Load, atom_node.lineno, - atom_node.column) - if len(second_child.children) == 1 or \ - second_child.children[1].type == tokens.COMMA: + return ast.List(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) + if second_child.num_children() == 1 or \ + second_child.get_child(1).type == tokens.COMMA: elts = self.get_expression_list(second_child) - return ast.List(elts, ast.Load, atom_node.lineno, - atom_node.column) + return ast.List(elts, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) return self.handle_listcomp(second_child) elif first_child_type == tokens.LBRACE: - maker = atom_node.children[1] + maker = atom_node.get_child(1) if maker.type == tokens.RBRACE: - return ast.Dict(None, None, atom_node.lineno, atom_node.column) - n_maker_children = len(maker.children) - if n_maker_children == 1 or maker.children[1].type == tokens.COMMA: + return ast.Dict(None, None, atom_node.get_lineno(), atom_node.get_column()) + n_maker_children = maker.num_children() + if n_maker_children == 1 or maker.get_child(1).type == tokens.COMMA: elts = [] for i in range(0, n_maker_children, 2): - elts.append(self.handle_expr(maker.children[i])) - return ast.Set(elts, atom_node.lineno, atom_node.column) - if maker.children[1].type == syms.comp_for: + elts.append(self.handle_expr(maker.get_child(i))) + return ast.Set(elts, atom_node.get_lineno(), atom_node.get_column()) + if maker.get_child(1).type == syms.comp_for: return self.handle_setcomp(maker) if (n_maker_children > 3 and - maker.children[3].type == syms.comp_for): + maker.get_child(3).type == syms.comp_for): return self.handle_dictcomp(maker) keys = [] values = [] for i in range(0, n_maker_children, 4): - keys.append(self.handle_expr(maker.children[i])) - values.append(self.handle_expr(maker.children[i + 2])) - return ast.Dict(keys, values, atom_node.lineno, atom_node.column) + keys.append(self.handle_expr(maker.get_child(i))) + values.append(self.handle_expr(maker.get_child(i + 2))) + return ast.Dict(keys, values, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.BACKQUOTE: - expr = self.handle_testlist(atom_node.children[1]) - return ast.Repr(expr, atom_node.lineno, atom_node.column) + expr = self.handle_testlist(atom_node.get_child(1)) + return ast.Repr(expr, atom_node.get_lineno(), atom_node.get_column()) else: raise AssertionError("unknown atom") def handle_testlist_gexp(self, gexp_node): - if len(gexp_node.children) > 1 and \ - gexp_node.children[1].type == syms.comp_for: + if gexp_node.num_children() > 1 and \ + gexp_node.get_child(1).type == syms.comp_for: return self.handle_genexp(gexp_node) return self.handle_testlist(gexp_node) @@ -1170,18 +1174,18 @@ current_for = comp_node while True: count += 1 - if len(current_for.children) == 5: - current_iter = current_for.children[4] + if current_for.num_children() == 5: + current_iter = current_for.get_child(4) else: return count while True: - first_child = current_iter.children[0] + first_child = current_iter.get_child(0) if first_child.type == for_type: - current_for = current_iter.children[0] + current_for = current_iter.get_child(0) break elif first_child.type == if_type: - if len(first_child.children) == 3: - current_iter = first_child.children[2] + if first_child.num_children() == 3: + current_iter = first_child.get_child(2) else: return count else: @@ -1190,13 +1194,13 @@ def count_comp_ifs(self, iter_node, for_type): count = 0 while True: - first_child = iter_node.children[0] + first_child = iter_node.get_child(0) if first_child.type == for_type: return count count += 1 - if len(first_child.children) == 2: + if first_child.num_children() == 2: return count - iter_node = first_child.children[2] + iter_node = first_child.get_child(2) @specialize.arg(2) def comprehension_helper(self, comp_node, @@ -1208,15 +1212,15 @@ fors_count = self.count_comp_fors(comp_node, for_type, if_type) comps = [] for i in range(fors_count): - for_node = comp_node.children[1] + for_node = comp_node.get_child(1) for_targets = self.handle_exprlist(for_node, ast.Store) - expr = handle_source_expression(comp_node.children[3]) + expr = handle_source_expression(comp_node.get_child(3)) assert isinstance(expr, ast.expr) - if len(for_node.children) == 1: + if for_node.num_children() == 1: comp = ast.comprehension(for_targets[0], expr, None) else: - col = comp_node.column - line = comp_node.lineno + col = comp_node.get_column() + line = comp_node.get_lineno() # Modified in python2.7, see http://bugs.python.org/issue6704 if comp_fix_unamed_tuple_location: expr_node = for_targets[0] @@ -1225,59 +1229,59 @@ line = expr_node.lineno target = ast.Tuple(for_targets, ast.Store, line, col) comp = ast.comprehension(target, expr, None) - if len(comp_node.children) == 5: - comp_node = comp_iter = comp_node.children[4] + if comp_node.num_children() == 5: + comp_node = comp_iter = comp_node.get_child(4) assert comp_iter.type == iter_type ifs_count = self.count_comp_ifs(comp_iter, for_type) if ifs_count: ifs = [] for j in range(ifs_count): - comp_node = comp_if = comp_iter.children[0] - ifs.append(self.handle_expr(comp_if.children[1])) - if len(comp_if.children) == 3: - comp_node = comp_iter = comp_if.children[2] + comp_node = comp_if = comp_iter.get_child(0) + ifs.append(self.handle_expr(comp_if.get_child(1))) + if comp_if.num_children() == 3: + comp_node = comp_iter = comp_if.get_child(2) comp.ifs = ifs if comp_node.type == iter_type: - comp_node = comp_node.children[0] + comp_node = comp_node.get_child(0) assert isinstance(comp, ast.comprehension) comps.append(comp) return comps def handle_genexp(self, genexp_node): - elt = self.handle_expr(genexp_node.children[0]) - comps = self.comprehension_helper(genexp_node.children[1], + elt = self.handle_expr(genexp_node.get_child(0)) + comps = self.comprehension_helper(genexp_node.get_child(1), comp_fix_unamed_tuple_location=True) - return ast.GeneratorExp(elt, comps, genexp_node.lineno, - genexp_node.column) + return ast.GeneratorExp(elt, comps, genexp_node.get_lineno(), + genexp_node.get_column()) def handle_listcomp(self, listcomp_node): - elt = self.handle_expr(listcomp_node.children[0]) - comps = self.comprehension_helper(listcomp_node.children[1], + elt = self.handle_expr(listcomp_node.get_child(0)) + comps = self.comprehension_helper(listcomp_node.get_child(1), "handle_testlist", syms.list_for, syms.list_if, syms.list_iter, comp_fix_unamed_tuple_location=True) - return ast.ListComp(elt, comps, listcomp_node.lineno, - listcomp_node.column) + return ast.ListComp(elt, comps, listcomp_node.get_lineno(), + listcomp_node.get_column()) def handle_setcomp(self, set_maker): - elt = self.handle_expr(set_maker.children[0]) - comps = self.comprehension_helper(set_maker.children[1], + elt = self.handle_expr(set_maker.get_child(0)) + comps = self.comprehension_helper(set_maker.get_child(1), comp_fix_unamed_tuple_location=True) - return ast.SetComp(elt, comps, set_maker.lineno, set_maker.column) + return ast.SetComp(elt, comps, set_maker.get_lineno(), set_maker.get_column()) def handle_dictcomp(self, dict_maker): - key = self.handle_expr(dict_maker.children[0]) - value = self.handle_expr(dict_maker.children[2]) - comps = self.comprehension_helper(dict_maker.children[3], + key = self.handle_expr(dict_maker.get_child(0)) + value = self.handle_expr(dict_maker.get_child(2)) + comps = self.comprehension_helper(dict_maker.get_child(3), comp_fix_unamed_tuple_location=True) - return ast.DictComp(key, value, comps, dict_maker.lineno, - dict_maker.column) + return ast.DictComp(key, value, comps, dict_maker.get_lineno(), + dict_maker.get_column()) def handle_exprlist(self, exprlist, context): exprs = [] - for i in range(0, len(exprlist.children), 2): - child = exprlist.children[i] + for i in range(0, exprlist.num_children(), 2): + child = exprlist.get_child(i) expr = self.handle_expr(child) self.set_context(expr, context) exprs.append(expr) diff --git a/pypy/interpreter/pyparser/parser.py b/pypy/interpreter/pyparser/parser.py --- a/pypy/interpreter/pyparser/parser.py +++ b/pypy/interpreter/pyparser/parser.py @@ -44,27 +44,86 @@ class Node(object): - __slots__ = "type value children lineno column".split() + __slots__ = "type lineno column".split() - def __init__(self, type, value, children, lineno, column): + def __init__(self, type, lineno, column): self.type = type - self.value = value - self.children = children self.lineno = lineno self.column = column def __eq__(self, other): - # For tests. - return (self.type == other.type and - self.value == other.value and - self.children == other.children) + raise NotImplementedError("abstract base class") + + def __ne__(self, other): + return not self == other + + def get_value(self): + return None + + def get_child(self, i): + raise NotImplementedError("abstract base class") + + def num_children(self): + return 0 + + def append_child(self, child): + raise NotImplementedError("abstract base class") + + def get_lineno(self): + return self.lineno + + def get_column(self): + return self.column + + +class Terminal(Node): + __slots__ = ("value", ) + def __init__(self, type, value, lineno, column): + Node.__init__(self, type, lineno, column) + self.value = value def __repr__(self): - if self.value is None: - return "Node(type=%s, children=%r)" % (self.type, self.children) - else: - return "Node(type=%s, value=%r)" % (self.type, self.value) + return "Terminal(type=%s, value=%r)" % (self.type, self.value) + def __eq__(self, other): + # For tests. + return (type(self) == type(other) and + self.type == other.type and + self.value == other.value) + + def get_value(self): + return self.value + + +class AbstractNonterminal(Node): + pass + +class Nonterminal(AbstractNonterminal): + __slots__ = ("_children", ) + def __init__(self, type, children, lineno, column): + Node.__init__(self, type, lineno, column) + self._children = children + + def __eq__(self, other): + # For tests. + return (type(self) == type(other) and + self.type == other.type and + self._children == other._children) + + def __repr__(self): + return "Nonterminal(type=%s, children=%r)" % (self.type, self._children) + + def get_child(self, i): + return self._children[i] + + def num_children(self): + return len(self._children) + + def append_child(self, child): + self._children.append(child) + if not self._children: + assert self.lineno == child.lineno + assert self.column == child.column class ParseError(Exception): @@ -97,7 +156,7 @@ if start == -1: start = self.grammar.start self.root = None - current_node = Node(start, None, [], 0, 0) + current_node = Nonterminal(start, [], 0, 0) self.stack = [] self.stack.append((self.grammar.dfas[start - 256], 0, current_node)) @@ -164,14 +223,14 @@ def shift(self, next_state, token_type, value, lineno, column): """Shift a non-terminal and prepare for the next state.""" dfa, state, node = self.stack[-1] - new_node = Node(token_type, value, None, lineno, column) - node.children.append(new_node) + new_node = Terminal(token_type, value, lineno, column) + node.append_child(new_node) self.stack[-1] = (dfa, next_state, node) def push(self, next_dfa, next_state, node_type, lineno, column): """Push a terminal and adjust the current state.""" dfa, state, node = self.stack[-1] - new_node = Node(node_type, None, [], lineno, column) + new_node = Nonterminal(node_type, [], lineno, column) self.stack[-1] = (dfa, next_state, node) self.stack.append((next_dfa, 0, new_node)) @@ -179,6 +238,6 @@ """Pop an entry off the stack and make its node a child of the last.""" dfa, state, node = self.stack.pop() if self.stack: - self.stack[-1][2].children.append(node) + self.stack[-1][2].append_child(node) else: self.root = node diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py --- a/pypy/interpreter/pyparser/pygram.py +++ b/pypy/interpreter/pyparser/pygram.py @@ -31,8 +31,11 @@ class _Symbols(object): pass +rev_lookup = {} for sym_name, idx in python_grammar.symbol_ids.iteritems(): setattr(_Symbols, sym_name, idx) + rev_lookup[idx] = sym_name syms = _Symbols() +syms._rev_lookup = rev_lookup # for debugging del _get_python_grammar, _Tokens, tok_name, sym_name, idx diff --git a/pypy/interpreter/pyparser/test/test_parser.py b/pypy/interpreter/pyparser/test/test_parser.py --- a/pypy/interpreter/pyparser/test/test_parser.py +++ b/pypy/interpreter/pyparser/test/test_parser.py @@ -52,24 +52,23 @@ value = "\n" else: value = "" - children = None + n = parser.Terminal(tp, value, 0, 0) else: tp = gram.symbol_ids[data[0]] - value = None children = [] - n = parser.Node(tp, value, children, 0, 0) + n = parser.Nonterminal(tp, children, 0, 0) new_indent = count_indent(line) if new_indent >= last_indent: if new_indent == last_indent and node_stack: node_stack.pop() if node_stack: - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) else: diff = last_indent - new_indent pop_nodes = diff // 4 + 1 del node_stack[-pop_nodes:] - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) last_indent = new_indent return node_stack[0] From pypy.commits at gmail.com Tue Apr 19 03:45:33 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Apr 2016 00:45:33 -0700 (PDT) Subject: [pypy-commit] pypy default: don't store line and column on Nonterminal, introduce a special one-child Message-ID: <5715e21d.46291c0a.dbffa.ffffd87d@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83759:e35996d1c1b6 Date: 2016-04-19 10:43 +0300 http://bitbucket.org/pypy/pypy/changeset/e35996d1c1b6/ Log: don't store line and column on Nonterminal, introduce a special one- child Nonterminal1 for all the intermediate nodes that doesn not need a list diff --git a/pypy/interpreter/pyparser/parser.py b/pypy/interpreter/pyparser/parser.py --- a/pypy/interpreter/pyparser/parser.py +++ b/pypy/interpreter/pyparser/parser.py @@ -44,12 +44,10 @@ class Node(object): - __slots__ = "type lineno column".split() + __slots__ = ("type", ) - def __init__(self, type, lineno, column): + def __init__(self, type): self.type = type - self.lineno = lineno - self.column = column def __eq__(self, other): raise NotImplementedError("abstract base class") @@ -70,17 +68,19 @@ raise NotImplementedError("abstract base class") def get_lineno(self): - return self.lineno + raise NotImplementedError("abstract base class") def get_column(self): - return self.column + raise NotImplementedError("abstract base class") class Terminal(Node): - __slots__ = ("value", ) + __slots__ = ("value", "lineno", "column") def __init__(self, type, value, lineno, column): - Node.__init__(self, type, lineno, column) + Node.__init__(self, type) self.value = value + self.lineno = lineno + self.column = column def __repr__(self): return "Terminal(type=%s, value=%r)" % (self.type, self.value) @@ -94,22 +94,43 @@ def get_value(self): return self.value + def get_lineno(self): + return self.lineno + + def get_column(self): + return self.column + class AbstractNonterminal(Node): - pass + __slots__ = () + + def get_lineno(self): + return self.get_child(0).get_lineno() + + def get_column(self): + return self.get_child(0).get_column() + + def __eq__(self, other): + # For tests. + # grumble, annoying + if not isinstance(other, AbstractNonterminal): + return False + if self.type != other.type: + return False + if self.num_children() != other.num_children(): + return False + for i in range(self.num_children()): + if self.get_child(i) != other.get_child(i): + return False + return True + class Nonterminal(AbstractNonterminal): __slots__ = ("_children", ) - def __init__(self, type, children, lineno, column): - Node.__init__(self, type, lineno, column) + def __init__(self, type, children): + Node.__init__(self, type) self._children = children - def __eq__(self, other): - # For tests. - return (type(self) == type(other) and - self.type == other.type and - self._children == other._children) - def __repr__(self): return "Nonterminal(type=%s, children=%r)" % (self.type, self._children) @@ -121,9 +142,28 @@ def append_child(self, child): self._children.append(child) - if not self._children: - assert self.lineno == child.lineno - assert self.column == child.column + + +class Nonterminal1(AbstractNonterminal): + __slots__ = ("_child", ) + def __init__(self, type, child): + Node.__init__(self, type) + self._child = child + + def __repr__(self): + return "Nonterminal(type=%s, children=[%r])" % (self.type, self._child) + + def get_child(self, i): + assert i == 0 or i == -1 + return self._child + + def num_children(self): + return 1 + + def append_child(self, child): + assert 0, "should be unreachable" + + class ParseError(Exception): @@ -156,7 +196,7 @@ if start == -1: start = self.grammar.start self.root = None - current_node = Nonterminal(start, [], 0, 0) + current_node = Nonterminal(start, []) self.stack = [] self.stack.append((self.grammar.dfas[start - 256], 0, current_node)) @@ -230,7 +270,7 @@ def push(self, next_dfa, next_state, node_type, lineno, column): """Push a terminal and adjust the current state.""" dfa, state, node = self.stack[-1] - new_node = Nonterminal(node_type, [], lineno, column) + new_node = Nonterminal(node_type, []) self.stack[-1] = (dfa, next_state, node) self.stack.append((next_dfa, 0, new_node)) @@ -238,6 +278,10 @@ """Pop an entry off the stack and make its node a child of the last.""" dfa, state, node = self.stack.pop() if self.stack: + # we are now done with node, so we can store it more efficiently if + # it has just one child + if node.num_children() == 1: + node = Nonterminal1(node.type, node.get_child(0)) self.stack[-1][2].append_child(node) else: self.root = node diff --git a/pypy/interpreter/pyparser/test/test_parser.py b/pypy/interpreter/pyparser/test/test_parser.py --- a/pypy/interpreter/pyparser/test/test_parser.py +++ b/pypy/interpreter/pyparser/test/test_parser.py @@ -56,7 +56,7 @@ else: tp = gram.symbol_ids[data[0]] children = [] - n = parser.Nonterminal(tp, children, 0, 0) + n = parser.Nonterminal(tp, children) new_indent = count_indent(line) if new_indent >= last_indent: if new_indent == last_indent and node_stack: diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -15,21 +15,21 @@ @specialize.arg(3) def _build_app_tree(self, space, node, seq_maker, with_lineno, with_column): - if node.children is not None: - seq_w = [None]*(len(node.children) + 1) + if node.num_children(): + seq_w = [None]*(node.num_children() + 1) seq_w[0] = space.wrap(node.type) - for i in range(1, len(node.children) + 1): - seq_w[i] = self._build_app_tree(space, node.children[i - 1], + for i in range(1, node.num_children() + 1): + seq_w[i] = self._build_app_tree(space, node.get_child(i - 1), seq_maker, with_lineno, with_column) else: seq_w = [None]*(2 + with_lineno + with_column) seq_w[0] = space.wrap(node.type) - seq_w[1] = space.wrap(node.value) + seq_w[1] = space.wrap(node.get_value()) if with_lineno: - seq_w[2] = space.wrap(node.lineno) + seq_w[2] = space.wrap(node.get_lineno()) if with_column: - seq_w[3] = space.wrap(node.column) + seq_w[3] = space.wrap(node.get_column()) return seq_maker(seq_w) def descr_issuite(self, space): From pypy.commits at gmail.com Tue Apr 19 03:57:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 00:57:15 -0700 (PDT) Subject: [pypy-commit] pypy default: print the progress uppon test failure (test_gil), increased the time it sleeps while checking the progress Message-ID: <5715e4db.455ec20a.30b4.2fdc@mx.google.com> Author: Richard Plangger Branch: Changeset: r83761:aacade14ae52 Date: 2016-04-19 09:55 +0200 http://bitbucket.org/pypy/pypy/changeset/aacade14ae52/ Log: print the progress uppon test failure (test_gil), increased the time it sleeps while checking the progress diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,7 @@ import time from pypy.module.thread import gil +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem import lltype from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread @@ -81,10 +83,13 @@ while len(state.data) < 2*N: debug_print(len(state.data)) if not still_waiting: + llop.debug_print(lltype.Void, "timeout. progress: " + "%d of 2*N (= %f%%)" % \ + (len(state.data), 2*N, 100*len(state.data)/(2.0*N))) raise ValueError("time out") still_waiting -= 1 if not we_are_translated(): rgil.release() - time.sleep(0.01) + time.sleep(0.1) if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 From pypy.commits at gmail.com Tue Apr 19 03:57:14 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 00:57:14 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: forgot to set get_location to pypy's main jitdriver Message-ID: <5715e4da.c31f1c0a.b4d83.ffffd756@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83760:eff88c49b7e9 Date: 2016-04-18 20:31 +0200 http://bitbucket.org/pypy/pypy/changeset/eff88c49b7e9/ Log: forgot to set get_location to pypy's main jitdriver diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -43,13 +43,18 @@ def get_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names - name = opcode_method_names[ord(bytecode.co_code[next_instr])] + opname = opcode_method_names[ord(bytecode.co_code[next_instr])] + if not opname: + opname = "" + name = bytecode.co_name + if not name: + name = "" # we can probably do better at co_firstlineno? return (bytecode.co_filename, bytecode.co_firstlineno, - bytecode.co_name, - next_instr, - name) + name, + intmask(next_instr), + opname) def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 @@ -60,6 +65,7 @@ virtualizables = ['frame'] pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, + get_location = get_location, get_unique_id = get_unique_id, should_unroll_one_iteration = should_unroll_one_iteration, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -681,9 +681,9 @@ # get_location new API get_location_ptr = self.jitdriver_sd._get_location_ptr if get_location_ptr is None: - missing = '(%s: no get_location)' % drivername + missing_get_loc = '(%s: no get_location)' % drivername def get_location(greenkey): - return (missing, 0, '', 0, '') + return (missing_get_loc, 0, '', 0, '') else: unwrap_greenkey = self.make_unwrap_greenkey() def get_location(greenkey): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -899,6 +899,7 @@ driver = self.instance.im_self h = self.annotate_hook h(driver.get_printable_location, driver.greens, **kwds_s) + h(driver.get_location, driver.greens, **kwds_s) def annotate_hook(self, func, variables, args_s=[], **kwds_s): if func is None: From pypy.commits at gmail.com Tue Apr 19 04:20:35 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 01:20:35 -0700 (PDT) Subject: [pypy-commit] cffi default: Support help(lib.foo) Message-ID: <5715ea53.432f1c0a.3cc2c.ffffea4d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2669:eeef3869b994 Date: 2016-04-19 10:21 +0200 http://bitbucket.org/cffi/cffi/changeset/eeef3869b994/ Log: Support help(lib.foo) diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4659,7 +4659,8 @@ #undef ALIGN_ARG -static void fb_cat_name(struct funcbuilder_s *fb, char *piece, int piecelen) +static void fb_cat_name(struct funcbuilder_s *fb, const char *piece, + int piecelen) { if (fb->bufferp == NULL) { fb->nb_bytes += piecelen; @@ -4670,10 +4671,11 @@ } } -static int fb_build_name(struct funcbuilder_s *fb, PyObject *fargs, - CTypeDescrObject *fresult, int ellipsis, int fabi) -{ - Py_ssize_t i, nargs = PyTuple_GET_SIZE(fargs); +static int fb_build_name(struct funcbuilder_s *fb, const char *repl, + CTypeDescrObject **pfargs, Py_ssize_t nargs, + CTypeDescrObject *fresult, int ellipsis) +{ + Py_ssize_t i; fb->nargs = nargs; /* name: the function type name we build here is, like in C, made @@ -4682,25 +4684,22 @@ RESULT_TYPE_HEAD (*)(ARG_1_TYPE, ARG_2_TYPE, etc) RESULT_TYPE_TAIL */ fb_cat_name(fb, fresult->ct_name, fresult->ct_name_position); + if (repl[0] != '(' && + fresult->ct_name[fresult->ct_name_position - 1] != '*') + fb_cat_name(fb, " ", 1); /* add a space */ + fb_cat_name(fb, repl, strlen(repl)); + if (fb->fct) { + i = strlen(repl) - 1; /* between '(*' and ')' */ + assert(repl[i] == ')'); + fb->fct->ct_name_position = fresult->ct_name_position + i; + } fb_cat_name(fb, "(", 1); - i = 2; -#if defined(MS_WIN32) && !defined(_WIN64) - if (fabi == FFI_STDCALL) { - fb_cat_name(fb, "__stdcall ", 10); - i += 10; - } -#endif - fb_cat_name(fb, "*)(", 3); - if (fb->fct) { - i = fresult->ct_name_position + i; /* between '(*' and ')(' */ - fb->fct->ct_name_position = i; - } /* loop over the arguments */ for (i=0; inb_bytes = 0; fb->bufferp = NULL; fb->fct = NULL; + pfargs = (CTypeDescrObject **)&PyTuple_GET_ITEM(fargs, 0); + nargs = PyTuple_GET_SIZE(fargs); +#if defined(MS_WIN32) && !defined(_WIN64) + if (fabi == FFI_STDCALL) + repl = "(__stdcall *)"; +#endif + /* compute the total size needed for the name */ - if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) + if (fb_build_name(fb, repl, pfargs, nargs, fresult, ellipsis) < 0) return NULL; /* allocate the function type */ @@ -4748,7 +4756,7 @@ /* call again fb_build_name() to really build the ct_name */ fb->bufferp = fct->ct_name; - if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) + if (fb_build_name(fb, repl, pfargs, nargs, fresult, ellipsis) < 0) goto error; assert(fb->bufferp == fct->ct_name + fb->nb_bytes); diff --git a/c/lib_obj.c b/c/lib_obj.c --- a/c/lib_obj.c +++ b/c/lib_obj.c @@ -3,8 +3,12 @@ module originally created by recompile(). A Lib object is special in the sense that it has a custom - __getattr__ which returns C globals, functions and constants. It - raises AttributeError for anything else, even attrs like '__class__'. + __getattr__ which returns C globals, functions and constants. The + original idea was to raise AttributeError for anything else, even + attrs like '__class__', but it breaks various things; now, standard + attrs are returned, but in the unlikely case where a user cdef()s + the same name, then the standard attr is hidden (and the various + things like introspection might break). A Lib object has got a reference to the _cffi_type_context_s structure, which is used to create lazily the objects returned by @@ -15,9 +19,8 @@ PyMethodDef md; void *direct_fn; int type_index; + char doc[1]; }; -static const char cpyextfunc_doc[] = - "direct call to the C function of the same name"; struct LibObject_s { PyObject_HEAD @@ -30,18 +33,22 @@ static struct CPyExtFunc_s *_cpyextfunc_get(PyObject *x) { - struct CPyExtFunc_s *exf; + PyObject *y; + LibObject *lo; + PyCFunctionObject *fo; if (!PyCFunction_Check(x)) return NULL; - if (!LibObject_Check(PyCFunction_GET_SELF(x))) + y = PyCFunction_GET_SELF(x); + if (!LibObject_Check(y)) return NULL; - exf = (struct CPyExtFunc_s *)(((PyCFunctionObject *)x) -> m_ml); - if (exf->md.ml_doc != cpyextfunc_doc) + fo = (PyCFunctionObject *)x; + lo = (LibObject *)y; + if (lo->l_libname != fo->m_module) return NULL; - return exf; + return (struct CPyExtFunc_s *)(fo->m_ml); } static PyObject *_cpyextfunc_type(LibObject *lib, struct CPyExtFunc_s *exf) @@ -111,56 +118,82 @@ built. The C extension code can then assume that they are, by calling _cffi_type(). */ - CTypeDescrObject *ct; + PyObject *result = NULL; + CTypeDescrObject **pfargs; + CTypeDescrObject *fresult; + Py_ssize_t nargs = 0; struct CPyExtFunc_s *xfunc; int i, type_index = _CFFI_GETARG(g->type_op); _cffi_opcode_t *opcodes = lib->l_types_builder->ctx.types; + static const char *const format = ";\n\nCFFI C function from %s.lib"; + char *libname = PyText_AS_UTF8(lib->l_libname); + struct funcbuilder_s funcbuilder; - if ((((uintptr_t)opcodes[type_index]) & 1) == 0) { - /* the function type was already built. No need to force - the arg and return value to be built again. */ + /* return type: */ + fresult = realize_c_func_return_type(lib->l_types_builder, opcodes, + type_index); + if (fresult == NULL) + goto error; + + /* argument types: */ + /* note that if the arguments are already built, they have a + pointer in the 'opcodes' array, and GETOP() returns a + random even value. But OP_FUNCTION_END is odd, so the + condition below still works correctly. */ + i = type_index + 1; + while (_CFFI_GETOP(opcodes[i]) != _CFFI_OP_FUNCTION_END) + i++; + pfargs = alloca(sizeof(CTypeDescrObject *) * (i - type_index - 1)); + i = type_index + 1; + while (_CFFI_GETOP(opcodes[i]) != _CFFI_OP_FUNCTION_END) { + CTypeDescrObject *ct = realize_c_type(lib->l_types_builder, opcodes, i); + if (ct == NULL) + goto error; + pfargs[nargs++] = ct; + i++; } - else { - assert(_CFFI_GETOP(opcodes[type_index]) == _CFFI_OP_FUNCTION); - /* return type: */ - ct = realize_c_type(lib->l_types_builder, opcodes, - _CFFI_GETARG(opcodes[type_index])); - if (ct == NULL) - return NULL; - Py_DECREF(ct); - - /* argument types: */ - i = type_index + 1; - while (_CFFI_GETOP(opcodes[i]) != _CFFI_OP_FUNCTION_END) { - ct = realize_c_type(lib->l_types_builder, opcodes, i); - if (ct == NULL) - return NULL; - Py_DECREF(ct); - i++; - } - } + memset(&funcbuilder, 0, sizeof(funcbuilder)); + if (fb_build_name(&funcbuilder, g->name, pfargs, nargs, fresult, 0) < 0) + goto error; /* xxx the few bytes of memory we allocate here leak, but it's a minor concern because it should only occur for CPYTHON_BLTN. There is one per real C function in a CFFI C extension module. CPython never unloads its C extension modules anyway. */ - xfunc = PyMem_Malloc(sizeof(struct CPyExtFunc_s)); + xfunc = PyMem_Malloc(sizeof(struct CPyExtFunc_s) + + funcbuilder.nb_bytes + + strlen(format) + strlen(libname)); if (xfunc == NULL) { PyErr_NoMemory(); - return NULL; + goto error; } memset((char *)xfunc, 0, sizeof(struct CPyExtFunc_s)); assert(g->address); xfunc->md.ml_meth = (PyCFunction)g->address; xfunc->md.ml_flags = flags; xfunc->md.ml_name = g->name; - xfunc->md.ml_doc = cpyextfunc_doc; + xfunc->md.ml_doc = xfunc->doc; xfunc->direct_fn = g->size_or_direct_fn; xfunc->type_index = type_index; - return PyCFunction_NewEx(&xfunc->md, (PyObject *)lib, lib->l_libname); + /* build the docstring */ + funcbuilder.bufferp = xfunc->doc; + if (fb_build_name(&funcbuilder, g->name, pfargs, nargs, fresult, 0) < 0) + goto error; + sprintf(funcbuilder.bufferp - 1, format, libname); + /* done building the docstring */ + + result = PyCFunction_NewEx(&xfunc->md, (PyObject *)lib, lib->l_libname); + /* fall-through */ + error: + Py_XDECREF(fresult); + while (nargs > 0) { + --nargs; + Py_DECREF(pfargs[nargs]); + } + return result; } static PyObject *lib_build_and_cache_attr(LibObject *lib, PyObject *name, diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -645,6 +645,33 @@ return x; }; +static CTypeDescrObject * +realize_c_func_return_type(builder_c_t *builder, + _cffi_opcode_t opcodes[], int index) +{ + PyObject *x; + CTypeDescrObject *ct; + _cffi_opcode_t op = opcodes[index]; + + if ((((uintptr_t)op) & 1) == 0) { + /* already built: assert that it is a function and fish + for the return type */ + x = (PyObject *)op; + assert(PyTuple_Check(x)); /* from _CFFI_OP_FUNCTION */ + x = PyTuple_GET_ITEM(x, 0); + assert(CTypeDescr_Check(x)); + assert(((CTypeDescrObject *)x)->ct_flags & CT_FUNCTIONPTR); + x = PyTuple_GET_ITEM(((CTypeDescrObject *)x)->ct_stuff, 1); + assert(CTypeDescr_Check(x)); + Py_INCREF(x); + return (CTypeDescrObject *)x; + } + else { + assert(_CFFI_GETOP(op) == _CFFI_OP_FUNCTION); + return realize_c_type(builder, opcodes, _CFFI_GETARG(opcodes[index])); + } +} + static int do_realize_lazy_struct(CTypeDescrObject *ct) { /* This is called by force_lazy_struct() in _cffi_backend.c */ diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -10,6 +10,13 @@ * ffi.unpack() +* extern "Python+C" + +* in API mode, ``help(lib.foo)`` returns a docstring containing the C + signature now. Note that ``help(lib)`` itself is still useless; I + haven't figured out the hacks needed to convince ``pydoc`` of + showing more. You can use ``dir(lib)`` but it is not most helpful. + v1.5.2 ====== diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -416,8 +416,11 @@ def test_math_sin_type(): ffi = FFI() - ffi.cdef("double sin(double);") - lib = verify(ffi, 'test_math_sin_type', '#include ') + ffi.cdef("double sin(double); void *xxtestfunc();") + lib = verify(ffi, 'test_math_sin_type', """ + #include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -427,7 +430,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__ == 'direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(): ffi = FFI() From pypy.commits at gmail.com Tue Apr 19 04:37:40 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 01:37:40 -0700 (PDT) Subject: [pypy-commit] pypy default: Update to cffi/eeef3869b994 Message-ID: <5715ee54.c711c30a.86149.1f5d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83762:cea8ead4e869 Date: 2016-04-19 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/cea8ead4e869/ Log: Update to cffi/eeef3869b994 diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py --- a/pypy/module/_cffi_backend/lib_obj.py +++ b/pypy/module/_cffi_backend/lib_obj.py @@ -64,7 +64,8 @@ # ptr = rffi.cast(rffi.CCHARP, g.c_address) assert ptr - return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn, + return W_FunctionWrapper(self.space, self.ffi, + ptr, g.c_size_or_direct_fn, rawfunctype, fnname, self.libname) @jit.elidable_promote() diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -238,7 +238,7 @@ self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and locs[0] == 'R') - def unexpected_fn_type(self, ffi): + def repr_fn_type(self, ffi, repl=""): fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: @@ -246,9 +246,14 @@ sargs = ', '.join(argnames) sret1 = fret.name[:fret.name_position] sret2 = fret.name[fret.name_position:] + if len(repl) > 0 and not sret1.endswith('*'): + repl = " " + repl + return '%s%s(%s)%s' % (sret1, repl, sargs, sret2) + + def unexpected_fn_type(self, ffi): raise oefmt(ffi.w_FFIError, - "the type '%s(%s)%s' is a function type, not a " - "pointer-to-function type", sret1, sargs, sret2) + "the type '%s' is a function type, not a " + "pointer-to-function type", self.repr_fn_type(ffi)) def realize_c_type(ffi, opcodes, index): diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -420,9 +420,11 @@ def test_math_sin_type(self): ffi, lib = self.prepare( - "double sin(double);", + "double sin(double); void *xxtestfunc();", 'test_math_sin_type', - '#include ') + """#include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -432,7 +434,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__=='direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(self): ffi, lib = self.prepare( diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.typedef import GetSetProperty from pypy.interpreter.gateway import interp2app from rpython.rlib import jit @@ -24,9 +25,8 @@ This class cannot be used for variadic functions. """ _immutable_ = True - common_doc_str = 'direct call to the C function of the same name' - def __init__(self, space, fnptr, directfnptr, + def __init__(self, space, ffi, fnptr, directfnptr, rawfunctype, fnname, modulename): # everything related to the type of the function is accessed # as immutable attributes of the 'rawfunctype' object, which @@ -39,6 +39,7 @@ assert locs is None or len(ctype.fargs) == len(locs) # self.space = space + self.ffi = ffi self.fnptr = fnptr self.directfnptr = directfnptr self.rawfunctype = rawfunctype @@ -93,6 +94,11 @@ def descr_repr(self, space): return space.wrap("" % (self.fnname,)) + def descr_get_doc(self, space): + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + doc = '%s;\n\nCFFI C function from %s.lib' % (doc, self.modulename) + return space.wrap(doc) + @jit.unroll_safe def prepare_args(space, rawfunctype, args_w, start_index): @@ -128,6 +134,6 @@ __call__ = interp2app(W_FunctionWrapper.descr_call), __name__ = interp_attrproperty('fnname', cls=W_FunctionWrapper), __module__ = interp_attrproperty('modulename', cls=W_FunctionWrapper), - __doc__ = interp_attrproperty('common_doc_str', cls=W_FunctionWrapper), + __doc__ = GetSetProperty(W_FunctionWrapper.descr_get_doc), ) W_FunctionWrapper.typedef.acceptable_as_base_class = False From pypy.commits at gmail.com Tue Apr 19 05:07:19 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 02:07:19 -0700 (PDT) Subject: [pypy-commit] pypy default: import this too Message-ID: <5715f547.01e61c0a.80402.fffff941@mx.google.com> Author: Armin Rigo Branch: Changeset: r83763:7e6501f41aa0 Date: 2016-04-19 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7e6501f41aa0/ Log: import this too diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -417,8 +417,11 @@ def test_math_sin_type(): ffi = FFI() - ffi.cdef("double sin(double);") - lib = verify(ffi, 'test_math_sin_type', '#include ') + ffi.cdef("double sin(double); void *xxtestfunc();") + lib = verify(ffi, 'test_math_sin_type', """ + #include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -428,7 +431,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__ == 'direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(): ffi = FFI() From pypy.commits at gmail.com Tue Apr 19 05:46:22 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 02:46:22 -0700 (PDT) Subject: [pypy-commit] pypy default: c_forkpty() must have the same "raw" declaration as c_fork(). Message-ID: <5715fe6e.519d1c0a.6dfa.0629@mx.google.com> Author: Armin Rigo Branch: Changeset: r83764:ed4a42015c48 Date: 2016-04-19 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/ed4a42015c48/ Log: c_forkpty() must have the same "raw" declaration as c_fork(). Also fix the reporting of errno after these two functions. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -189,6 +189,7 @@ rthread.tlfield_alt_errno.setraw(_get_errno()) else: rthread.tlfield_rpy_errno.setraw(_get_errno()) + # ^^^ keep fork() up-to-date too, below if os.name == 'nt': @@ -765,17 +766,19 @@ save_err=rffi.RFFI_SAVE_ERRNO) c_forkpty = external('forkpty', [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], - rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) + rffi.PID_T, _nowrapper = True) @replace_os_function('fork') @jit.dont_look_inside def fork(): # NB. keep forkpty() up-to-date, too + # lots of custom logic here, to do things in the right order ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() childpid = c_fork() + errno = _get_errno() rthread.gc_thread_after_fork(childpid, opaqueaddr) + rthread.tlfield_rpy_errno.setraw(errno) childpid = handle_posix_error('fork', childpid) if childpid == 0: debug.debug_forked(ofs) @@ -799,11 +802,14 @@ def forkpty(): master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') master_p[0] = rffi.cast(rffi.INT, -1) + null = lltype.nullptr(rffi.VOIDP.TO) try: ofs = debug.debug_offset() opaqueaddr = rthread.gc_thread_before_fork() - childpid = c_forkpty(master_p, None, None, None) + childpid = c_forkpty(master_p, null, null, null) + errno = _get_errno() rthread.gc_thread_after_fork(childpid, opaqueaddr) + rthread.tlfield_rpy_errno.setraw(errno) childpid = handle_posix_error('forkpty', childpid) if childpid == 0: debug.debug_forked(ofs) From pypy.commits at gmail.com Tue Apr 19 06:10:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 03:10:08 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: modified ztranslation test to check the get_location and ensure it can be translated Message-ID: <57160400.8673c20a.c9221.57af@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83765:983043f03fcd Date: 2016-04-19 12:09 +0200 http://bitbucket.org/pypy/pypy/changeset/983043f03fcd/ Log: modified ztranslation test to check the get_location and ensure it can be translated diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -48,9 +48,13 @@ lltype.Float, macro=True, releasegil=True, compilation_info=eci) + def get_location(): + return "file", 0, "func", 0, "opcode" + jitdriver = JitDriver(greens = [], reds = ['total', 'frame', 'j'], - virtualizables = ['frame']) + virtualizables = ['frame'], + get_location = get_location) def f(i, j): for param, _ in unroll_parameters: defl = PARAMETERS[param] diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -689,8 +689,14 @@ def get_location(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) - llres = fn(*greenargs) - return llres + tuple_ptr = fn(*greenargs) + # it seems there is no "hltuple" function + return (hlstr(tuple_ptr.item0), + intmask(tuple_ptr.item1), + hlstr(tuple_ptr.item2), + intmask(tuple_ptr.item3), + hlstr(tuple_ptr.item4) + ) self.get_location = get_location # printable_loc_ptr = self.jitdriver_sd._printable_loc_ptr diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -7,7 +7,6 @@ from rpython.rtyper.lltypesystem import rffi, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.rweaklist import RWeakListMixin -from rpython.jit.metainterp import jitlog MAX_FUNC_NAME = 1023 @@ -125,6 +124,7 @@ def enable_jitlog(self, fileno): # initialize the jit log + from rpython.jit.metainterp import jitlog p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) From pypy.commits at gmail.com Tue Apr 19 06:28:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 03:28:16 -0700 (PDT) Subject: [pypy-commit] pypy default: Mention the issue in the crash message Message-ID: <57160840.cfa81c0a.320e3.1ee8@mx.google.com> Author: Armin Rigo Branch: Changeset: r83766:fc35b7e19735 Date: 2016-04-19 12:28 +0200 http://bitbucket.org/pypy/pypy/changeset/fc35b7e19735/ Log: Mention the issue in the crash message diff --git a/rpython/translator/c/src/thread_gil.c b/rpython/translator/c/src/thread_gil.c --- a/rpython/translator/c/src/thread_gil.c +++ b/rpython/translator/c/src/thread_gil.c @@ -89,7 +89,9 @@ * at precisely this moment, killing the first thread. */ fprintf(stderr, "Fatal RPython error: a thread is trying to wait " - "for the GIL, but the GIL was not initialized\n"); + "for the GIL, but the GIL was not initialized\n" + "(For PyPy, see " + "https://bitbucket.org/pypy/pypy/issues/2274)\n"); abort(); } From pypy.commits at gmail.com Tue Apr 19 06:42:20 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 03:42:20 -0700 (PDT) Subject: [pypy-commit] pypy default: Uh, the test was not doing anything Message-ID: <57160b8c.6614c20a.ebfb5.5dbd@mx.google.com> Author: Armin Rigo Branch: Changeset: r83767:a6cd003d2b48 Date: 2016-04-19 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/a6cd003d2b48/ Log: Uh, the test was not doing anything diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -109,6 +109,8 @@ return PyLong_FromLong(3); """), ]) + res = module.bounce() + assert res == 3 From pypy.commits at gmail.com Tue Apr 19 06:42:22 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 03:42:22 -0700 (PDT) Subject: [pypy-commit] pypy default: PyEval_ThreadsInitialized(): don't return always 1 Message-ID: <57160b8e.58811c0a.26051.ffffa693@mx.google.com> Author: Armin Rigo Branch: Changeset: r83768:e623b2b8996e Date: 2016-04-19 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/e623b2b8996e/ Log: PyEval_ThreadsInitialized(): don't return always 1 diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -52,7 +52,8 @@ def PyEval_ThreadsInitialized(space): if not space.config.translation.thread: return 0 - return 1 + from pypy.module.thread import os_thread + return int(os_thread.threads_initialized(space)) # XXX: might be generally useful def encapsulator(T, flavor='raw', dealloc=None): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -112,6 +112,16 @@ res = module.bounce() assert res == 3 + def test_threadsinitialized(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + """ + return PyInt_FromLong(PyEval_ThreadsInitialized()); + """), + ]) + res = module.test() + print "got", res + assert res in (0, 1) class TestInterpreterState(BaseApiTest): diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -34,6 +34,9 @@ result = False # already set up return result + def threads_initialized(self): + return self.gil_ready + ## def reinit_threads(self, space): ## "Called in the child process after a fork()" ## OSThreadLocals.reinit_threads(self, space) diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -148,6 +148,9 @@ space.threadlocals.setup_threads(space) bootstrapper.setup(space) +def threads_initialized(space): + return space.threadlocals.threads_initialized() + def reinit_threads(space): "Called in the child process after a fork()" From pypy.commits at gmail.com Tue Apr 19 07:58:13 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 04:58:13 -0700 (PDT) Subject: [pypy-commit] pypy default: update for 5.1 release Message-ID: <57161d55.d3161c0a.f9c08.4657@mx.google.com> Author: mattip Branch: Changeset: r83769:aa60332382a1 Date: 2016-04-19 14:54 +0300 http://bitbucket.org/pypy/pypy/changeset/aa60332382a1/ Log: update for 5.1 release diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -21,16 +21,14 @@ 'own-linux-x86-64', 'own-linux-armhf', 'own-win-x86-32', + 'own-linux-s390x-2', # 'own-macosx-x86-32', -# 'pypy-c-app-level-linux-x86-32', -# 'pypy-c-app-level-linux-x86-64', -# 'pypy-c-stackless-app-level-linux-x86-32', -# 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', 'pypy-c-jit-linux-x86-64', - 'pypy-c-jit-freebsd-9-x86-64', +# 'pypy-c-jit-freebsd-9-x86-64', 'pypy-c-jit-macosx-x86-64', 'pypy-c-jit-win-x86-32', + 'pypy-c-jit-linux-s390x-2', 'build-pypy-c-jit-linux-armhf-raring', 'build-pypy-c-jit-linux-armhf-raspbian', 'build-pypy-c-jit-linux-armel', diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 -min=0 -rev=1 +min=1 +rev=0 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top From pypy.commits at gmail.com Tue Apr 19 07:58:15 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 04:58:15 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: merge default into branch Message-ID: <57161d57.82b71c0a.3578d.4724@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83770:94369b856427 Date: 2016-04-19 14:57 +0300 http://bitbucket.org/pypy/pypy/changeset/94369b856427/ Log: merge default into branch diff too long, truncating to 2000 out of 5943 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -320,8 +320,7 @@ def __reduce_ex__(self, proto): return type(self), (list(self), self.maxlen) - def __hash__(self): - raise TypeError("deque objects are unhashable") + __hash__ = None def __copy__(self): return self.__class__(self, self.maxlen) diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,51 +1,22 @@ -from resource import _struct_rusage, struct_rusage -from ctypes import CDLL, c_int, POINTER, byref -from ctypes.util import find_library +from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] -libc = CDLL(find_library("c")) -c_wait3 = libc.wait3 -c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait3.restype = c_int - -c_wait4 = libc.wait4 -c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait4.restype = c_int - -def create_struct_rusage(c_struct): - return struct_rusage(( - float(c_struct.ru_utime), - float(c_struct.ru_stime), - c_struct.ru_maxrss, - c_struct.ru_ixrss, - c_struct.ru_idrss, - c_struct.ru_isrss, - c_struct.ru_minflt, - c_struct.ru_majflt, - c_struct.ru_nswap, - c_struct.ru_inblock, - c_struct.ru_oublock, - c_struct.ru_msgsnd, - c_struct.ru_msgrcv, - c_struct.ru_nsignals, - c_struct.ru_nvcsw, - c_struct.ru_nivcsw)) def wait3(options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait3(status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage def wait4(pid, options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait4(pid, status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_resource_build.py @@ -0,0 +1,118 @@ +from cffi import FFI + +ffi = FFI() + +# Note: we don't directly expose 'struct timeval' or 'struct rlimit' + + +rlimit_consts = ''' +RLIMIT_CPU +RLIMIT_FSIZE +RLIMIT_DATA +RLIMIT_STACK +RLIMIT_CORE +RLIMIT_NOFILE +RLIMIT_OFILE +RLIMIT_VMEM +RLIMIT_AS +RLIMIT_RSS +RLIMIT_NPROC +RLIMIT_MEMLOCK +RLIMIT_SBSIZE +RLIM_INFINITY +RUSAGE_SELF +RUSAGE_CHILDREN +RUSAGE_BOTH +'''.split() + +rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s) + for s in rlimit_consts] + + +ffi.set_source("_resource_cffi", """ +#include +#include +#include +#include + +static const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[] = { +$RLIMIT_CONSTS + { NULL, 0 } +}; + +#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001) + +static double my_utime(struct rusage *input) +{ + return doubletime(input->ru_utime); +} + +static double my_stime(struct rusage *input) +{ + return doubletime(input->ru_stime); +} + +static int my_getrlimit(int resource, long long result[2]) +{ + struct rlimit rl; + if (getrlimit(resource, &rl) == -1) + return -1; + result[0] = rl.rlim_cur; + result[1] = rl.rlim_max; + return 0; +} + +static int my_setrlimit(int resource, long long cur, long long max) +{ + struct rlimit rl; + rl.rlim_cur = cur & RLIM_INFINITY; + rl.rlim_max = max & RLIM_INFINITY; + return setrlimit(resource, &rl); +} + +""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts))) + + +ffi.cdef(""" + +#define RLIM_NLIMITS ... + +const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[]; + +struct rusage { + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; + ...; +}; + +static double my_utime(struct rusage *); +static double my_stime(struct rusage *); +void getrusage(int who, struct rusage *result); +int my_getrlimit(int resource, long long result[2]); +int my_setrlimit(int resource, long long cur, long long max); + +int wait3(int *status, int options, struct rusage *rusage); +int wait4(int pid, int *status, int options, struct rusage *rusage); +""") + + +if __name__ == "__main__": + ffi.compile() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,7 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -88,6 +89,12 @@ # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) @@ -98,7 +105,10 @@ #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) - parts.append('void __cffi_extern_python_start; ') + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) @@ -302,7 +312,7 @@ break # try: - self._inside_extern_python = False + self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -376,8 +386,10 @@ tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' - elif self._inside_extern_python: + elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) @@ -421,11 +433,9 @@ # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" - self._inside_extern_python = not self._inside_extern_python - assert self._inside_extern_python == ( - decl.name == '__cffi_extern_python_start') + self._inside_extern_python = decl.name else: - if self._inside_extern_python: + if self._inside_extern_python !='__cffi_extern_python_stop': raise api.CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1145,11 +1145,11 @@ def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype - def _generate_cpy_dllexport_python_collecttype(self, tp, name): - self._generate_cpy_extern_python_collecttype(tp, name) - - def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1184,11 +1184,7 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - if dllexport: - tag = 'CFFI_DLLEXPORT' - else: - tag = 'static' - prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1206,8 +1202,14 @@ prnt() self._num_externpy += 1 + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + def _generate_cpy_dllexport_python_decl(self, tp, name): - self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: @@ -1220,8 +1222,9 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - def _generate_cpy_dllexport_python_ctx(self, tp, name): - self._generate_cpy_extern_python_ctx(tp, name) + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx def _string_literal(self, s): def _char_repr(c): @@ -1231,7 +1234,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1322,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1336,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1350,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1447,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1468,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1495,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty new file mode 100644 --- /dev/null +++ b/lib_pypy/ctypes_config_cache/.empty @@ -0,0 +1,1 @@ +dummy file to allow old buildbot configuration to run diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py deleted file mode 100644 diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/dumpcache.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys, os -from ctypes_configure import dumpcache - -def dumpcache2(basename, config): - size = 32 if sys.maxint <= 2**32 else 64 - filename = '_%s_%s_.py' % (basename, size) - dumpcache.dumpcache(__file__, filename, config) - # - filename = os.path.join(os.path.dirname(__file__), - '_%s_cache.py' % (basename,)) - g = open(filename, 'w') - print >> g, '''\ -import sys -_size = 32 if sys.maxint <= 2**32 else 64 -# XXX relative import, should be removed together with -# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib -_mod = __import__("_%s_%%s_" %% (_size,), - globals(), locals(), ["*"]) -globals().update(_mod.__dict__)\ -''' % (basename,) - g.close() diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/locale.ctc.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -'ctypes_configure' source for _locale.py. -Run this to rebuild _locale_cache.py. -""" - -from ctypes_configure.configure import (configure, ExternalCompilationInfo, - ConstantInteger, DefinedConstantInteger, SimpleType, check_eci) -import dumpcache - -# ____________________________________________________________ - -_CONSTANTS = [ - 'LC_CTYPE', - 'LC_TIME', - 'LC_COLLATE', - 'LC_MONETARY', - 'LC_MESSAGES', - 'LC_NUMERIC', - 'LC_ALL', - 'CHAR_MAX', -] - -class LocaleConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['limits.h', - 'locale.h']) -for key in _CONSTANTS: - setattr(LocaleConfigure, key, DefinedConstantInteger(key)) - -config = configure(LocaleConfigure, noerr=True) -for key, value in config.items(): - if value is None: - del config[key] - _CONSTANTS.remove(key) - -# ____________________________________________________________ - -eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h']) -HAS_LANGINFO = check_eci(eci) - -if HAS_LANGINFO: - # list of all possible names - langinfo_names = [ - "RADIXCHAR", "THOUSEP", "CRNCYSTR", - "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR", - "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT", - "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT", - ] - for i in range(1, 8): - langinfo_names.append("DAY_%d" % i) - langinfo_names.append("ABDAY_%d" % i) - for i in range(1, 13): - langinfo_names.append("MON_%d" % i) - langinfo_names.append("ABMON_%d" % i) - - class LanginfoConfigure: - _compilation_info_ = eci - nl_item = SimpleType('nl_item') - for key in langinfo_names: - setattr(LanginfoConfigure, key, DefinedConstantInteger(key)) - - langinfo_config = configure(LanginfoConfigure) - for key, value in langinfo_config.items(): - if value is None: - del langinfo_config[key] - langinfo_names.remove(key) - config.update(langinfo_config) - _CONSTANTS += langinfo_names - -# ____________________________________________________________ - -config['ALL_CONSTANTS'] = tuple(_CONSTANTS) -config['HAS_LANGINFO'] = HAS_LANGINFO -dumpcache.dumpcache2('locale', config) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py deleted file mode 100755 --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# Run this script to rebuild all caches from the *.ctc.py files. - -import os, sys - -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) - -import py - -_dirpath = os.path.dirname(__file__) or os.curdir - -from rpython.tool.ansi_print import AnsiLogger -log = AnsiLogger("ctypes_config_cache") - - -def rebuild_one(name): - filename = os.path.join(_dirpath, name) - d = {'__file__': filename} - path = sys.path[:] - try: - sys.path.insert(0, _dirpath) - execfile(filename, d) - finally: - sys.path[:] = path - -def try_rebuild(): - size = 32 if sys.maxint <= 2**32 else 64 - # remove the files '_*_size_.py' - left = {} - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_%s_.py' % size) or - p.endswith('_%s_.pyc' % size)): - os.unlink(os.path.join(_dirpath, p)) - elif p.startswith('_') and (p.endswith('_.py') or - p.endswith('_.pyc')): - for i in range(2, len(p)-4): - left[p[:i]] = True - # remove the files '_*_cache.py' if there is no '_*_*_.py' left around - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_cache.py') or - p.endswith('_cache.pyc')): - if p[:-9] not in left: - os.unlink(os.path.join(_dirpath, p)) - # - for p in os.listdir(_dirpath): - if p.endswith('.ctc.py'): - try: - rebuild_one(p) - except Exception, e: - log.ERROR("Running %s:\n %s: %s" % ( - os.path.join(_dirpath, p), - e.__class__.__name__, e)) - - -if __name__ == '__main__': - try_rebuild() diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/resource.ctc.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -'ctypes_configure' source for resource.py. -Run this to rebuild _resource_cache.py. -""" - - -from ctypes import sizeof -import dumpcache -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger, - SimpleType) - - -_CONSTANTS = ( - 'RLIM_INFINITY', - 'RLIM_NLIMITS', -) -_OPTIONAL_CONSTANTS = ( - 'RLIMIT_CPU', - 'RLIMIT_FSIZE', - 'RLIMIT_DATA', - 'RLIMIT_STACK', - 'RLIMIT_CORE', - 'RLIMIT_RSS', - 'RLIMIT_NPROC', - 'RLIMIT_NOFILE', - 'RLIMIT_OFILE', - 'RLIMIT_MEMLOCK', - 'RLIMIT_AS', - 'RLIMIT_LOCKS', - 'RLIMIT_SIGPENDING', - 'RLIMIT_MSGQUEUE', - 'RLIMIT_NICE', - 'RLIMIT_RTPRIO', - 'RLIMIT_VMEM', - - 'RUSAGE_BOTH', - 'RUSAGE_SELF', - 'RUSAGE_CHILDREN', -) - -# Setup our configure -class ResourceConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) - rlim_t = SimpleType('rlim_t') -for key in _CONSTANTS: - setattr(ResourceConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(ResourceConfigure, key, DefinedConstantInteger(key)) - -# Configure constants and types -config = configure(ResourceConfigure) -config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 -optional_constants = [] -for key in _OPTIONAL_CONSTANTS: - if config[key] is not None: - optional_constants.append(key) - else: - del config[key] - -config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) -dumpcache.dumpcache2('resource', config) diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -1,4 +1,4 @@ -# ctypes implementation: Victor Stinner, 2008-05-08 +# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides access to the Unix password database. It is available on all Unix versions. diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -1,15 +1,8 @@ -import sys -if sys.platform == 'win32': - raise ImportError('resource module not available for win32') +"""http://docs.python.org/library/resource""" -# load the platform-specific cache made by running resource.ctc.py -from ctypes_config_cache._resource_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno -from ctypes import Structure, c_int, c_long, byref, POINTER +from _resource_cffi import ffi, lib from errno import EINVAL, EPERM -import _structseq +import _structseq, os try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -18,106 +11,37 @@ class error(Exception): pass +class struct_rusage: + """struct_rusage: Result from getrusage. -# Read required libc functions -_getrusage = libc.getrusage -_getrlimit = libc.getrlimit -_setrlimit = libc.setrlimit -try: - _getpagesize = libc.getpagesize - _getpagesize.argtypes = () - _getpagesize.restype = c_int -except AttributeError: - from os import sysconf - _getpagesize = None +This object may be accessed either as a tuple of + (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt, + nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw) +or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.""" - -class timeval(Structure): - _fields_ = ( - ("tv_sec", c_long), - ("tv_usec", c_long), - ) - def __str__(self): - return "(%s, %s)" % (self.tv_sec, self.tv_usec) - - def __float__(self): - return self.tv_sec + self.tv_usec/1000000.0 - -class _struct_rusage(Structure): - _fields_ = ( - ("ru_utime", timeval), - ("ru_stime", timeval), - ("ru_maxrss", c_long), - ("ru_ixrss", c_long), - ("ru_idrss", c_long), - ("ru_isrss", c_long), - ("ru_minflt", c_long), - ("ru_majflt", c_long), - ("ru_nswap", c_long), - ("ru_inblock", c_long), - ("ru_oublock", c_long), - ("ru_msgsnd", c_long), - ("ru_msgrcv", c_long), - ("ru_nsignals", c_long), - ("ru_nvcsw", c_long), - ("ru_nivcsw", c_long), - ) - -_getrusage.argtypes = (c_int, POINTER(_struct_rusage)) -_getrusage.restype = c_int - - -class struct_rusage: __metaclass__ = _structseq.structseqtype - ru_utime = _structseq.structseqfield(0) - ru_stime = _structseq.structseqfield(1) - ru_maxrss = _structseq.structseqfield(2) - ru_ixrss = _structseq.structseqfield(3) - ru_idrss = _structseq.structseqfield(4) - ru_isrss = _structseq.structseqfield(5) - ru_minflt = _structseq.structseqfield(6) - ru_majflt = _structseq.structseqfield(7) - ru_nswap = _structseq.structseqfield(8) - ru_inblock = _structseq.structseqfield(9) - ru_oublock = _structseq.structseqfield(10) - ru_msgsnd = _structseq.structseqfield(11) - ru_msgrcv = _structseq.structseqfield(12) - ru_nsignals = _structseq.structseqfield(13) - ru_nvcsw = _structseq.structseqfield(14) - ru_nivcsw = _structseq.structseqfield(15) + ru_utime = _structseq.structseqfield(0, "user time used") + ru_stime = _structseq.structseqfield(1, "system time used") + ru_maxrss = _structseq.structseqfield(2, "max. resident set size") + ru_ixrss = _structseq.structseqfield(3, "shared memory size") + ru_idrss = _structseq.structseqfield(4, "unshared data size") + ru_isrss = _structseq.structseqfield(5, "unshared stack size") + ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O") + ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O") + ru_nswap = _structseq.structseqfield(8, "number of swap outs") + ru_inblock = _structseq.structseqfield(9, "block input operations") + ru_oublock = _structseq.structseqfield(10, "block output operations") + ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent") + ru_msgrcv = _structseq.structseqfield(12, "IPC messages received") + ru_nsignals = _structseq.structseqfield(13,"signals received") + ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches") + ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches") - at builtinify -def rlimit_check_bounds(rlim_cur, rlim_max): - if rlim_cur > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_cur) - if rlim_max > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_max) - -class rlimit(Structure): - _fields_ = ( - ("rlim_cur", rlim_t), - ("rlim_max", rlim_t), - ) - -_getrlimit.argtypes = (c_int, POINTER(rlimit)) -_getrlimit.restype = c_int -_setrlimit.argtypes = (c_int, POINTER(rlimit)) -_setrlimit.restype = c_int - - - at builtinify -def getrusage(who): - ru = _struct_rusage() - ret = _getrusage(who, byref(ru)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - raise ValueError("invalid who parameter") - raise error(errno) +def _make_struct_rusage(ru): return struct_rusage(( - float(ru.ru_utime), - float(ru.ru_stime), + lib.my_utime(ru), + lib.my_stime(ru), ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, @@ -135,48 +59,59 @@ )) @builtinify +def getrusage(who): + ru = ffi.new("struct rusage *") + if lib.getrusage(who, ru) == -1: + if ffi.errno == EINVAL: + raise ValueError("invalid who parameter") + raise error(ffi.errno) + return _make_struct_rusage(ru) + + at builtinify def getrlimit(resource): - if not(0 <= resource < RLIM_NLIMITS): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlim = rlimit() - ret = _getrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - raise error(errno) - return (rlim.rlim_cur, rlim.rlim_max) + result = ffi.new("long long[2]") + if lib.my_getrlimit(resource, result) == -1: + raise error(ffi.errno) + return (result[0], result[1]) @builtinify -def setrlimit(resource, rlim): - if not(0 <= resource < RLIM_NLIMITS): +def setrlimit(resource, limits): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlimit_check_bounds(*rlim) - rlim = rlimit(rlim[0], rlim[1]) - ret = _setrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - return ValueError("current limit exceeds maximum limit") - elif errno == EPERM: - return ValueError("not allowed to raise maximum limit") + limits = tuple(limits) + if len(limits) != 2: + raise ValueError("expected a tuple of 2 integers") + + if lib.my_setrlimit(resource, limits[0], limits[1]) == -1: + if ffi.errno == EINVAL: + raise ValueError("current limit exceeds maximum limit") + elif ffi.errno == EPERM: + raise ValueError("not allowed to raise maximum limit") else: - raise error(errno) + raise error(ffi.errno) + @builtinify def getpagesize(): - if _getpagesize: - return _getpagesize() - else: - try: - return sysconf("SC_PAGE_SIZE") - except ValueError: - # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE - return sysconf("SC_PAGESIZE") + return os.sysconf("SC_PAGESIZE") -__all__ = ALL_CONSTANTS + ( - 'error', 'timeval', 'struct_rusage', 'rlimit', - 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize', + +def _setup(): + all_constants = [] + p = lib.my_rlimit_consts + while p.name: + name = ffi.string(p.name) + globals()[name] = int(p.value) + all_constants.append(name) + p += 1 + return all_constants + +__all__ = tuple(_setup()) + ( + 'error', 'getpagesize', 'struct_rusage', + 'getrusage', 'getrlimit', 'setrlimit', ) - -del ALL_CONSTANTS +del _setup diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -2,12 +2,11 @@ PyPy 5.1 ======== -We have released PyPy 5.1, about two months after PyPy 5.0.1. +We have released PyPy 5.1, about a month after PyPy 5.0. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata. - -We now fully support the IBM s390x architecture. +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. You can download the PyPy 5.1 release here: @@ -52,22 +51,46 @@ .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org -Other Highlights (since 5.0.1 released in Febuary 2015) +Other Highlights (since 5.0 released in March 2015) ========================================================= * New features: - * + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. - * + * Add better support for PyUnicodeObject in the C-API compatibility layer - * + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility * Bug Fixes - * + * Catch exceptions raised in an exit function - * + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Fix sandbox startup (a regression in 5.0) * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at @@ -75,21 +98,32 @@ * Numpy: - * + * Implemented numpy.where for a single argument - * + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy * Performance improvements: - * + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting - * + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend * Internal refactorings: - * + * Use a simpler logger to speed up translation - * + * Drop vestiges of Python 2.5 support in testing .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,12 @@ .. this is a revision shortly after release-5.1 .. startrev: 2180e1eaf6f6 +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -340,10 +340,6 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() - space = make_objspace(config) # manually imports app_main.py diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -54,24 +54,24 @@ n = self.root_node if n.type == syms.file_input: stmts = [] - for i in range(len(n.children) - 1): - stmt = n.children[i] + for i in range(n.num_children() - 1): + stmt = n.get_child(i) if stmt.type == tokens.NEWLINE: continue sub_stmts_count = self.number_of_statements(stmt) if sub_stmts_count == 1: stmts.append(self.handle_stmt(stmt)) else: - stmt = stmt.children[0] + stmt = stmt.get_child(0) for j in range(sub_stmts_count): - small_stmt = stmt.children[j * 2] + small_stmt = stmt.get_child(j * 2) stmts.append(self.handle_stmt(small_stmt)) return ast.Module(stmts) elif n.type == syms.eval_input: - body = self.handle_testlist(n.children[0]) + body = self.handle_testlist(n.get_child(0)) return ast.Expression(body) elif n.type == syms.single_input: - first_child = n.children[0] + first_child = n.get_child(0) if first_child.type == tokens.NEWLINE: # An empty line. return ast.Interactive([]) @@ -81,8 +81,8 @@ stmts = [self.handle_stmt(first_child)] else: stmts = [] - for i in range(0, len(first_child.children), 2): - stmt = first_child.children[i] + for i in range(0, first_child.num_children(), 2): + stmt = first_child.get_child(i) if stmt.type == tokens.NEWLINE: break stmts.append(self.handle_stmt(stmt)) @@ -96,16 +96,16 @@ if stmt_type == syms.compound_stmt: return 1 elif stmt_type == syms.stmt: - return self.number_of_statements(n.children[0]) + return self.number_of_statements(n.get_child(0)) elif stmt_type == syms.simple_stmt: # Divide to remove semi-colons. - return len(n.children) // 2 + return n.num_children() // 2 else: raise AssertionError("non-statement node") def error(self, msg, n): """Raise a SyntaxError with the lineno and column set to n's.""" - raise SyntaxError(msg, n.lineno, n.column, + raise SyntaxError(msg, n.get_lineno(), n.get_column(), filename=self.compile_info.filename) def error_ast(self, msg, ast_node): @@ -132,51 +132,51 @@ expressions = None newline = True start = 1 - child_count = len(print_node.children) - if child_count > 2 and print_node.children[1].type == tokens.RIGHTSHIFT: - dest = self.handle_expr(print_node.children[2]) + child_count = print_node.num_children() + if child_count > 2 and print_node.get_child(1).type == tokens.RIGHTSHIFT: + dest = self.handle_expr(print_node.get_child(2)) start = 4 if (child_count + 1 - start) // 2: - expressions = [self.handle_expr(print_node.children[i]) + expressions = [self.handle_expr(print_node.get_child(i)) for i in range(start, child_count, 2)] - if print_node.children[-1].type == tokens.COMMA: + if print_node.get_child(-1).type == tokens.COMMA: newline = False - return ast.Print(dest, expressions, newline, print_node.lineno, - print_node.column) + return ast.Print(dest, expressions, newline, print_node.get_lineno(), + print_node.get_column()) def handle_del_stmt(self, del_node): - targets = self.handle_exprlist(del_node.children[1], ast.Del) - return ast.Delete(targets, del_node.lineno, del_node.column) + targets = self.handle_exprlist(del_node.get_child(1), ast.Del) + return ast.Delete(targets, del_node.get_lineno(), del_node.get_column()) def handle_flow_stmt(self, flow_node): - first_child = flow_node.children[0] + first_child = flow_node.get_child(0) first_child_type = first_child.type if first_child_type == syms.break_stmt: - return ast.Break(flow_node.lineno, flow_node.column) + return ast.Break(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.continue_stmt: - return ast.Continue(flow_node.lineno, flow_node.column) + return ast.Continue(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.yield_stmt: - yield_expr = self.handle_expr(first_child.children[0]) - return ast.Expr(yield_expr, flow_node.lineno, flow_node.column) + yield_expr = self.handle_expr(first_child.get_child(0)) + return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.return_stmt: - if len(first_child.children) == 1: + if first_child.num_children() == 1: values = None else: - values = self.handle_testlist(first_child.children[1]) - return ast.Return(values, flow_node.lineno, flow_node.column) + values = self.handle_testlist(first_child.get_child(1)) + return ast.Return(values, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.raise_stmt: exc = None value = None traceback = None - child_count = len(first_child.children) + child_count = first_child.num_children() if child_count >= 2: - exc = self.handle_expr(first_child.children[1]) + exc = self.handle_expr(first_child.get_child(1)) if child_count >= 4: - value = self.handle_expr(first_child.children[3]) + value = self.handle_expr(first_child.get_child(3)) if child_count == 6: - traceback = self.handle_expr(first_child.children[5]) - return ast.Raise(exc, value, traceback, flow_node.lineno, - flow_node.column) + traceback = self.handle_expr(first_child.get_child(5)) + return ast.Raise(exc, value, traceback, flow_node.get_lineno(), + flow_node.get_column()) else: raise AssertionError("unknown flow statement") @@ -184,32 +184,32 @@ while True: import_name_type = import_name.type if import_name_type == syms.import_as_name: - name = import_name.children[0].value - if len(import_name.children) == 3: - as_name = import_name.children[2].value - self.check_forbidden_name(as_name, import_name.children[2]) + name = import_name.get_child(0).get_value() + if import_name.num_children() == 3: + as_name = import_name.get_child(2).get_value() + self.check_forbidden_name(as_name, import_name.get_child(2)) else: as_name = None - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, as_name) elif import_name_type == syms.dotted_as_name: - if len(import_name.children) == 1: - import_name = import_name.children[0] + if import_name.num_children() == 1: + import_name = import_name.get_child(0) continue - alias = self.alias_for_import_name(import_name.children[0], + alias = self.alias_for_import_name(import_name.get_child(0), store=False) - asname_node = import_name.children[2] - alias.asname = asname_node.value + asname_node = import_name.get_child(2) + alias.asname = asname_node.get_value() self.check_forbidden_name(alias.asname, asname_node) return alias elif import_name_type == syms.dotted_name: - if len(import_name.children) == 1: - name = import_name.children[0].value + if import_name.num_children() == 1: + name = import_name.get_child(0).get_value() if store: - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, None) - name_parts = [import_name.children[i].value - for i in range(0, len(import_name.children), 2)] + name_parts = [import_name.get_child(i).get_value() + for i in range(0, import_name.num_children(), 2)] name = ".".join(name_parts) return ast.alias(name, None) elif import_name_type == tokens.STAR: @@ -218,20 +218,20 @@ raise AssertionError("unknown import name") def handle_import_stmt(self, import_node): - import_node = import_node.children[0] + import_node = import_node.get_child(0) if import_node.type == syms.import_name: - dotted_as_names = import_node.children[1] - aliases = [self.alias_for_import_name(dotted_as_names.children[i]) - for i in range(0, len(dotted_as_names.children), 2)] - return ast.Import(aliases, import_node.lineno, import_node.column) + dotted_as_names = import_node.get_child(1) + aliases = [self.alias_for_import_name(dotted_as_names.get_child(i)) + for i in range(0, dotted_as_names.num_children(), 2)] + return ast.Import(aliases, import_node.get_lineno(), import_node.get_column()) elif import_node.type == syms.import_from: - child_count = len(import_node.children) + child_count = import_node.num_children() module = None modname = None i = 1 dot_count = 0 while i < child_count: - child = import_node.children[i] + child = import_node.get_child(i) if child.type == syms.dotted_name: module = self.alias_for_import_name(child, False) i += 1 @@ -241,16 +241,16 @@ i += 1 dot_count += 1 i += 1 - after_import_type = import_node.children[i].type + after_import_type = import_node.get_child(i).type star_import = False if after_import_type == tokens.STAR: - names_node = import_node.children[i] + names_node = import_node.get_child(i) star_import = True elif after_import_type == tokens.LPAR: - names_node = import_node.children[i + 1] + names_node = import_node.get_child(i + 1) elif after_import_type == syms.import_as_names: - names_node = import_node.children[i] - if len(names_node.children) % 2 == 0: + names_node = import_node.get_child(i) + if names_node.num_children() % 2 == 0: self.error("trailing comma is only allowed with " "surronding parenthesis", names_node) else: @@ -258,25 +258,25 @@ if star_import: aliases = [self.alias_for_import_name(names_node)] else: - aliases = [self.alias_for_import_name(names_node.children[i]) - for i in range(0, len(names_node.children), 2)] + aliases = [self.alias_for_import_name(names_node.get_child(i)) + for i in range(0, names_node.num_children(), 2)] if module is not None: modname = module.name return ast.ImportFrom(modname, aliases, dot_count, - import_node.lineno, import_node.column) + import_node.get_lineno(), import_node.get_column()) else: raise AssertionError("unknown import node") def handle_global_stmt(self, global_node): - names = [global_node.children[i].value - for i in range(1, len(global_node.children), 2)] - return ast.Global(names, global_node.lineno, global_node.column) + names = [global_node.get_child(i).get_value() + for i in range(1, global_node.num_children(), 2)] + return ast.Global(names, global_node.get_lineno(), global_node.get_column()) def handle_exec_stmt(self, exec_node): - child_count = len(exec_node.children) + child_count = exec_node.num_children() globs = None locs = None - to_execute = self.handle_expr(exec_node.children[1]) + to_execute = self.handle_expr(exec_node.get_child(1)) if child_count < 4: if isinstance(to_execute, ast.Tuple) and \ (len(to_execute.elts) == 2 or len(to_execute.elts) == 3): @@ -285,272 +285,273 @@ locs = to_execute.elts[2] to_execute = to_execute.elts[0] elif child_count >= 4: - globs = self.handle_expr(exec_node.children[3]) + globs = self.handle_expr(exec_node.get_child(3)) if child_count == 6: - locs = self.handle_expr(exec_node.children[5]) - return ast.Exec(to_execute, globs, locs, exec_node.lineno, - exec_node.column) + locs = self.handle_expr(exec_node.get_child(5)) + return ast.Exec(to_execute, globs, locs, exec_node.get_lineno(), + exec_node.get_column()) def handle_assert_stmt(self, assert_node): - expr = self.handle_expr(assert_node.children[1]) + expr = self.handle_expr(assert_node.get_child(1)) msg = None - if len(assert_node.children) == 4: - msg = self.handle_expr(assert_node.children[3]) - return ast.Assert(expr, msg, assert_node.lineno, assert_node.column) + if assert_node.num_children() == 4: + msg = self.handle_expr(assert_node.get_child(3)) + return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column()) def handle_suite(self, suite_node): - first_child = suite_node.children[0] + first_child = suite_node.get_child(0) if first_child.type == syms.simple_stmt: - end = len(first_child.children) - 1 - if first_child.children[end - 1].type == tokens.SEMI: + end = first_child.num_children() - 1 + if first_child.get_child(end - 1).type == tokens.SEMI: end -= 1 - stmts = [self.handle_stmt(first_child.children[i]) + stmts = [self.handle_stmt(first_child.get_child(i)) for i in range(0, end, 2)] else: stmts = [] - for i in range(2, len(suite_node.children) - 1): - stmt = suite_node.children[i] + for i in range(2, suite_node.num_children() - 1): + stmt = suite_node.get_child(i) stmt_count = self.number_of_statements(stmt) if stmt_count == 1: stmts.append(self.handle_stmt(stmt)) else: - simple_stmt = stmt.children[0] - for j in range(0, len(simple_stmt.children), 2): - stmt = simple_stmt.children[j] - if not stmt.children: + simple_stmt = stmt.get_child(0) + for j in range(0, simple_stmt.num_children(), 2): + stmt = simple_stmt.get_child(j) + if not stmt.num_children(): break stmts.append(self.handle_stmt(stmt)) return stmts def handle_if_stmt(self, if_node): - child_count = len(if_node.children) + child_count = if_node.num_children() if child_count == 4: - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - return ast.If(test, suite, None, if_node.lineno, if_node.column) - otherwise_string = if_node.children[4].value + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column()) + otherwise_string = if_node.get_child(4).get_value() if otherwise_string == "else": - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - else_suite = self.handle_suite(if_node.children[6]) - return ast.If(test, suite, else_suite, if_node.lineno, - if_node.column) + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + else_suite = self.handle_suite(if_node.get_child(6)) + return ast.If(test, suite, else_suite, if_node.get_lineno(), + if_node.get_column()) elif otherwise_string == "elif": elif_count = child_count - 4 - after_elif = if_node.children[elif_count + 1] + after_elif = if_node.get_child(elif_count + 1) if after_elif.type == tokens.NAME and \ - after_elif.value == "else": + after_elif.get_value() == "else": has_else = True elif_count -= 3 else: has_else = False elif_count /= 4 if has_else: - last_elif = if_node.children[-6] + last_elif = if_node.get_child(-6) last_elif_test = self.handle_expr(last_elif) - elif_body = self.handle_suite(if_node.children[-4]) - else_body = self.handle_suite(if_node.children[-1]) + elif_body = self.handle_suite(if_node.get_child(-4)) + else_body = self.handle_suite(if_node.get_child(-1)) otherwise = [ast.If(last_elif_test, elif_body, else_body, - last_elif.lineno, last_elif.column)] + last_elif.get_lineno(), last_elif.get_column())] elif_count -= 1 else: otherwise = None for i in range(elif_count): offset = 5 + (elif_count - i - 1) * 4 - elif_test_node = if_node.children[offset] + elif_test_node = if_node.get_child(offset) elif_test = self.handle_expr(elif_test_node) - elif_body = self.handle_suite(if_node.children[offset + 2]) + elif_body = self.handle_suite(if_node.get_child(offset + 2)) new_if = ast.If(elif_test, elif_body, otherwise, - elif_test_node.lineno, elif_test_node.column) + elif_test_node.get_lineno(), elif_test_node.get_column()) otherwise = [new_if] - expr = self.handle_expr(if_node.children[1]) - body = self.handle_suite(if_node.children[3]) - return ast.If(expr, body, otherwise, if_node.lineno, if_node.column) + expr = self.handle_expr(if_node.get_child(1)) + body = self.handle_suite(if_node.get_child(3)) + return ast.If(expr, body, otherwise, if_node.get_lineno(), if_node.get_column()) else: raise AssertionError("unknown if statement configuration") def handle_while_stmt(self, while_node): - loop_test = self.handle_expr(while_node.children[1]) - body = self.handle_suite(while_node.children[3]) - if len(while_node.children) == 7: - otherwise = self.handle_suite(while_node.children[6]) + loop_test = self.handle_expr(while_node.get_child(1)) + body = self.handle_suite(while_node.get_child(3)) + if while_node.num_children() == 7: + otherwise = self.handle_suite(while_node.get_child(6)) else: otherwise = None - return ast.While(loop_test, body, otherwise, while_node.lineno, - while_node.column) + return ast.While(loop_test, body, otherwise, while_node.get_lineno(), + while_node.get_column()) def handle_for_stmt(self, for_node): - target_node = for_node.children[1] + target_node = for_node.get_child(1) target_as_exprlist = self.handle_exprlist(target_node, ast.Store) - if len(target_node.children) == 1: + if target_node.num_children() == 1: target = target_as_exprlist[0] else: target = ast.Tuple(target_as_exprlist, ast.Store, - target_node.lineno, target_node.column) - expr = self.handle_testlist(for_node.children[3]) - body = self.handle_suite(for_node.children[5]) - if len(for_node.children) == 9: - otherwise = self.handle_suite(for_node.children[8]) + target_node.get_lineno(), target_node.get_column()) + expr = self.handle_testlist(for_node.get_child(3)) + body = self.handle_suite(for_node.get_child(5)) + if for_node.num_children() == 9: + otherwise = self.handle_suite(for_node.get_child(8)) else: otherwise = None - return ast.For(target, expr, body, otherwise, for_node.lineno, - for_node.column) + return ast.For(target, expr, body, otherwise, for_node.get_lineno(), + for_node.get_column()) def handle_except_clause(self, exc, body): test = None target = None suite = self.handle_suite(body) - child_count = len(exc.children) + child_count = exc.num_children() if child_count >= 2: - test = self.handle_expr(exc.children[1]) + test = self.handle_expr(exc.get_child(1)) if child_count == 4: - target_child = exc.children[3] + target_child = exc.get_child(3) target = self.handle_expr(target_child) self.set_context(target, ast.Store) - return ast.ExceptHandler(test, target, suite, exc.lineno, exc.column) + return ast.ExceptHandler(test, target, suite, exc.get_lineno(), exc.get_column()) def handle_try_stmt(self, try_node): - body = self.handle_suite(try_node.children[2]) - child_count = len(try_node.children) + body = self.handle_suite(try_node.get_child(2)) + child_count = try_node.num_children() except_count = (child_count - 3 ) // 3 otherwise = None finally_suite = None - possible_extra_clause = try_node.children[-3] + possible_extra_clause = try_node.get_child(-3) if possible_extra_clause.type == tokens.NAME: - if possible_extra_clause.value == "finally": + if possible_extra_clause.get_value() == "finally": if child_count >= 9 and \ - try_node.children[-6].type == tokens.NAME: - otherwise = self.handle_suite(try_node.children[-4]) + try_node.get_child(-6).type == tokens.NAME: + otherwise = self.handle_suite(try_node.get_child(-4)) except_count -= 1 - finally_suite = self.handle_suite(try_node.children[-1]) + finally_suite = self.handle_suite(try_node.get_child(-1)) except_count -= 1 else: - otherwise = self.handle_suite(try_node.children[-1]) + otherwise = self.handle_suite(try_node.get_child(-1)) except_count -= 1 if except_count: handlers = [] for i in range(except_count): base_offset = i * 3 - exc = try_node.children[3 + base_offset] - except_body = try_node.children[5 + base_offset] + exc = try_node.get_child(3 + base_offset) + except_body = try_node.get_child(5 + base_offset) handlers.append(self.handle_except_clause(exc, except_body)) except_ast = ast.TryExcept(body, handlers, otherwise, - try_node.lineno, try_node.column) + try_node.get_lineno(), try_node.get_column()) if finally_suite is None: return except_ast body = [except_ast] - return ast.TryFinally(body, finally_suite, try_node.lineno, - try_node.column) + return ast.TryFinally(body, finally_suite, try_node.get_lineno(), + try_node.get_column()) def handle_with_stmt(self, with_node): - body = self.handle_suite(with_node.children[-1]) - i = len(with_node.children) - 1 + body = self.handle_suite(with_node.get_child(-1)) + i = with_node.num_children() - 1 while True: i -= 2 - item = with_node.children[i] - test = self.handle_expr(item.children[0]) - if len(item.children) == 3: - target = self.handle_expr(item.children[2]) + item = with_node.get_child(i) + test = self.handle_expr(item.get_child(0)) + if item.num_children() == 3: + target = self.handle_expr(item.get_child(2)) self.set_context(target, ast.Store) else: target = None - wi = ast.With(test, target, body, with_node.lineno, - with_node.column) + wi = ast.With(test, target, body, with_node.get_lineno(), + with_node.get_column()) if i == 1: break body = [wi] return wi def handle_classdef(self, classdef_node, decorators=None): - name_node = classdef_node.children[1] - name = name_node.value + name_node = classdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - if len(classdef_node.children) == 4: - body = self.handle_suite(classdef_node.children[3]) + if classdef_node.num_children() == 4: + body = self.handle_suite(classdef_node.get_child(3)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - if classdef_node.children[3].type == tokens.RPAR: - body = self.handle_suite(classdef_node.children[5]) + classdef_node.get_lineno(), classdef_node.get_column()) + if classdef_node.get_child(3).type == tokens.RPAR: + body = self.handle_suite(classdef_node.get_child(5)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - bases = self.handle_class_bases(classdef_node.children[3]) - body = self.handle_suite(classdef_node.children[6]) - return ast.ClassDef(name, bases, body, decorators, classdef_node.lineno, - classdef_node.column) + classdef_node.get_lineno(), classdef_node.get_column()) + bases = self.handle_class_bases(classdef_node.get_child(3)) + body = self.handle_suite(classdef_node.get_child(6)) + return ast.ClassDef(name, bases, body, decorators, classdef_node.get_lineno(), + classdef_node.get_column()) def handle_class_bases(self, bases_node): - if len(bases_node.children) == 1: - return [self.handle_expr(bases_node.children[0])] + if bases_node.num_children() == 1: + return [self.handle_expr(bases_node.get_child(0))] return self.get_expression_list(bases_node) def handle_funcdef(self, funcdef_node, decorators=None): - name_node = funcdef_node.children[1] - name = name_node.value + name_node = funcdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - args = self.handle_arguments(funcdef_node.children[2]) - body = self.handle_suite(funcdef_node.children[4]) + args = self.handle_arguments(funcdef_node.get_child(2)) + body = self.handle_suite(funcdef_node.get_child(4)) return ast.FunctionDef(name, args, body, decorators, - funcdef_node.lineno, funcdef_node.column) + funcdef_node.get_lineno(), funcdef_node.get_column()) def handle_decorated(self, decorated_node): - decorators = self.handle_decorators(decorated_node.children[0]) - definition = decorated_node.children[1] + decorators = self.handle_decorators(decorated_node.get_child(0)) + definition = decorated_node.get_child(1) if definition.type == syms.funcdef: node = self.handle_funcdef(definition, decorators) elif definition.type == syms.classdef: node = self.handle_classdef(definition, decorators) else: raise AssertionError("unkown decorated") - node.lineno = decorated_node.lineno - node.col_offset = decorated_node.column + node.lineno = decorated_node.get_lineno() + node.col_offset = decorated_node.get_column() return node def handle_decorators(self, decorators_node): - return [self.handle_decorator(dec) for dec in decorators_node.children] + return [self.handle_decorator(decorators_node.get_child(i)) + for i in range(decorators_node.num_children())] def handle_decorator(self, decorator_node): - dec_name = self.handle_dotted_name(decorator_node.children[1]) - if len(decorator_node.children) == 3: + dec_name = self.handle_dotted_name(decorator_node.get_child(1)) + if decorator_node.num_children() == 3: dec = dec_name - elif len(decorator_node.children) == 5: + elif decorator_node.num_children() == 5: dec = ast.Call(dec_name, None, None, None, None, - decorator_node.lineno, decorator_node.column) + decorator_node.get_lineno(), decorator_node.get_column()) else: - dec = self.handle_call(decorator_node.children[3], dec_name) + dec = self.handle_call(decorator_node.get_child(3), dec_name) return dec def handle_dotted_name(self, dotted_name_node): - base_value = dotted_name_node.children[0].value - name = ast.Name(base_value, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) - for i in range(2, len(dotted_name_node.children), 2): - attr = dotted_name_node.children[i].value - name = ast.Attribute(name, attr, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) + base_value = dotted_name_node.get_child(0).get_value() + name = ast.Name(base_value, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) + for i in range(2, dotted_name_node.num_children(), 2): + attr = dotted_name_node.get_child(i).get_value() + name = ast.Attribute(name, attr, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) return name From pypy.commits at gmail.com Tue Apr 19 09:23:49 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 06:23:49 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <57163165.49f9c20a.ef746.18aa@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83771:4e15627bce95 Date: 2016-04-19 15:27 +0300 http://bitbucket.org/pypy/pypy/changeset/4e15627bce95/ Log: merge default into branch diff too long, truncating to 2000 out of 2165 lines diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -320,8 +320,7 @@ def __reduce_ex__(self, proto): return type(self), (list(self), self.maxlen) - def __hash__(self): - raise TypeError("deque objects are unhashable") + __hash__ = None def __copy__(self): return self.__class__(self, self.maxlen) diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty new file mode 100644 --- /dev/null +++ b/lib_pypy/ctypes_config_cache/.empty @@ -0,0 +1,1 @@ +dummy file to allow old buildbot configuration to run diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -54,24 +54,24 @@ n = self.root_node if n.type == syms.file_input: stmts = [] - for i in range(len(n.children) - 1): - stmt = n.children[i] + for i in range(n.num_children() - 1): + stmt = n.get_child(i) if stmt.type == tokens.NEWLINE: continue sub_stmts_count = self.number_of_statements(stmt) if sub_stmts_count == 1: stmts.append(self.handle_stmt(stmt)) else: - stmt = stmt.children[0] + stmt = stmt.get_child(0) for j in range(sub_stmts_count): - small_stmt = stmt.children[j * 2] + small_stmt = stmt.get_child(j * 2) stmts.append(self.handle_stmt(small_stmt)) return ast.Module(stmts) elif n.type == syms.eval_input: - body = self.handle_testlist(n.children[0]) + body = self.handle_testlist(n.get_child(0)) return ast.Expression(body) elif n.type == syms.single_input: - first_child = n.children[0] + first_child = n.get_child(0) if first_child.type == tokens.NEWLINE: # An empty line. return ast.Interactive([]) @@ -81,8 +81,8 @@ stmts = [self.handle_stmt(first_child)] else: stmts = [] - for i in range(0, len(first_child.children), 2): - stmt = first_child.children[i] + for i in range(0, first_child.num_children(), 2): + stmt = first_child.get_child(i) if stmt.type == tokens.NEWLINE: break stmts.append(self.handle_stmt(stmt)) @@ -96,16 +96,16 @@ if stmt_type == syms.compound_stmt: return 1 elif stmt_type == syms.stmt: - return self.number_of_statements(n.children[0]) + return self.number_of_statements(n.get_child(0)) elif stmt_type == syms.simple_stmt: # Divide to remove semi-colons. - return len(n.children) // 2 + return n.num_children() // 2 else: raise AssertionError("non-statement node") def error(self, msg, n): """Raise a SyntaxError with the lineno and column set to n's.""" - raise SyntaxError(msg, n.lineno, n.column, + raise SyntaxError(msg, n.get_lineno(), n.get_column(), filename=self.compile_info.filename) def error_ast(self, msg, ast_node): @@ -132,51 +132,51 @@ expressions = None newline = True start = 1 - child_count = len(print_node.children) - if child_count > 2 and print_node.children[1].type == tokens.RIGHTSHIFT: - dest = self.handle_expr(print_node.children[2]) + child_count = print_node.num_children() + if child_count > 2 and print_node.get_child(1).type == tokens.RIGHTSHIFT: + dest = self.handle_expr(print_node.get_child(2)) start = 4 if (child_count + 1 - start) // 2: - expressions = [self.handle_expr(print_node.children[i]) + expressions = [self.handle_expr(print_node.get_child(i)) for i in range(start, child_count, 2)] - if print_node.children[-1].type == tokens.COMMA: + if print_node.get_child(-1).type == tokens.COMMA: newline = False - return ast.Print(dest, expressions, newline, print_node.lineno, - print_node.column) + return ast.Print(dest, expressions, newline, print_node.get_lineno(), + print_node.get_column()) def handle_del_stmt(self, del_node): - targets = self.handle_exprlist(del_node.children[1], ast.Del) - return ast.Delete(targets, del_node.lineno, del_node.column) + targets = self.handle_exprlist(del_node.get_child(1), ast.Del) + return ast.Delete(targets, del_node.get_lineno(), del_node.get_column()) def handle_flow_stmt(self, flow_node): - first_child = flow_node.children[0] + first_child = flow_node.get_child(0) first_child_type = first_child.type if first_child_type == syms.break_stmt: - return ast.Break(flow_node.lineno, flow_node.column) + return ast.Break(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.continue_stmt: - return ast.Continue(flow_node.lineno, flow_node.column) + return ast.Continue(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.yield_stmt: - yield_expr = self.handle_expr(first_child.children[0]) - return ast.Expr(yield_expr, flow_node.lineno, flow_node.column) + yield_expr = self.handle_expr(first_child.get_child(0)) + return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.return_stmt: - if len(first_child.children) == 1: + if first_child.num_children() == 1: values = None else: - values = self.handle_testlist(first_child.children[1]) - return ast.Return(values, flow_node.lineno, flow_node.column) + values = self.handle_testlist(first_child.get_child(1)) + return ast.Return(values, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.raise_stmt: exc = None value = None traceback = None - child_count = len(first_child.children) + child_count = first_child.num_children() if child_count >= 2: - exc = self.handle_expr(first_child.children[1]) + exc = self.handle_expr(first_child.get_child(1)) if child_count >= 4: - value = self.handle_expr(first_child.children[3]) + value = self.handle_expr(first_child.get_child(3)) if child_count == 6: - traceback = self.handle_expr(first_child.children[5]) - return ast.Raise(exc, value, traceback, flow_node.lineno, - flow_node.column) + traceback = self.handle_expr(first_child.get_child(5)) + return ast.Raise(exc, value, traceback, flow_node.get_lineno(), + flow_node.get_column()) else: raise AssertionError("unknown flow statement") @@ -184,32 +184,32 @@ while True: import_name_type = import_name.type if import_name_type == syms.import_as_name: - name = import_name.children[0].value - if len(import_name.children) == 3: - as_name = import_name.children[2].value - self.check_forbidden_name(as_name, import_name.children[2]) + name = import_name.get_child(0).get_value() + if import_name.num_children() == 3: + as_name = import_name.get_child(2).get_value() + self.check_forbidden_name(as_name, import_name.get_child(2)) else: as_name = None - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, as_name) elif import_name_type == syms.dotted_as_name: - if len(import_name.children) == 1: - import_name = import_name.children[0] + if import_name.num_children() == 1: + import_name = import_name.get_child(0) continue - alias = self.alias_for_import_name(import_name.children[0], + alias = self.alias_for_import_name(import_name.get_child(0), store=False) - asname_node = import_name.children[2] - alias.asname = asname_node.value + asname_node = import_name.get_child(2) + alias.asname = asname_node.get_value() self.check_forbidden_name(alias.asname, asname_node) return alias elif import_name_type == syms.dotted_name: - if len(import_name.children) == 1: - name = import_name.children[0].value + if import_name.num_children() == 1: + name = import_name.get_child(0).get_value() if store: - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, None) - name_parts = [import_name.children[i].value - for i in range(0, len(import_name.children), 2)] + name_parts = [import_name.get_child(i).get_value() + for i in range(0, import_name.num_children(), 2)] name = ".".join(name_parts) return ast.alias(name, None) elif import_name_type == tokens.STAR: @@ -218,20 +218,20 @@ raise AssertionError("unknown import name") def handle_import_stmt(self, import_node): - import_node = import_node.children[0] + import_node = import_node.get_child(0) if import_node.type == syms.import_name: - dotted_as_names = import_node.children[1] - aliases = [self.alias_for_import_name(dotted_as_names.children[i]) - for i in range(0, len(dotted_as_names.children), 2)] - return ast.Import(aliases, import_node.lineno, import_node.column) + dotted_as_names = import_node.get_child(1) + aliases = [self.alias_for_import_name(dotted_as_names.get_child(i)) + for i in range(0, dotted_as_names.num_children(), 2)] + return ast.Import(aliases, import_node.get_lineno(), import_node.get_column()) elif import_node.type == syms.import_from: - child_count = len(import_node.children) + child_count = import_node.num_children() module = None modname = None i = 1 dot_count = 0 while i < child_count: - child = import_node.children[i] + child = import_node.get_child(i) if child.type == syms.dotted_name: module = self.alias_for_import_name(child, False) i += 1 @@ -241,16 +241,16 @@ i += 1 dot_count += 1 i += 1 - after_import_type = import_node.children[i].type + after_import_type = import_node.get_child(i).type star_import = False if after_import_type == tokens.STAR: - names_node = import_node.children[i] + names_node = import_node.get_child(i) star_import = True elif after_import_type == tokens.LPAR: - names_node = import_node.children[i + 1] + names_node = import_node.get_child(i + 1) elif after_import_type == syms.import_as_names: - names_node = import_node.children[i] - if len(names_node.children) % 2 == 0: + names_node = import_node.get_child(i) + if names_node.num_children() % 2 == 0: self.error("trailing comma is only allowed with " "surronding parenthesis", names_node) else: @@ -258,25 +258,25 @@ if star_import: aliases = [self.alias_for_import_name(names_node)] else: - aliases = [self.alias_for_import_name(names_node.children[i]) - for i in range(0, len(names_node.children), 2)] + aliases = [self.alias_for_import_name(names_node.get_child(i)) + for i in range(0, names_node.num_children(), 2)] if module is not None: modname = module.name return ast.ImportFrom(modname, aliases, dot_count, - import_node.lineno, import_node.column) + import_node.get_lineno(), import_node.get_column()) else: raise AssertionError("unknown import node") def handle_global_stmt(self, global_node): - names = [global_node.children[i].value - for i in range(1, len(global_node.children), 2)] - return ast.Global(names, global_node.lineno, global_node.column) + names = [global_node.get_child(i).get_value() + for i in range(1, global_node.num_children(), 2)] + return ast.Global(names, global_node.get_lineno(), global_node.get_column()) def handle_exec_stmt(self, exec_node): - child_count = len(exec_node.children) + child_count = exec_node.num_children() globs = None locs = None - to_execute = self.handle_expr(exec_node.children[1]) + to_execute = self.handle_expr(exec_node.get_child(1)) if child_count < 4: if isinstance(to_execute, ast.Tuple) and \ (len(to_execute.elts) == 2 or len(to_execute.elts) == 3): @@ -285,272 +285,273 @@ locs = to_execute.elts[2] to_execute = to_execute.elts[0] elif child_count >= 4: - globs = self.handle_expr(exec_node.children[3]) + globs = self.handle_expr(exec_node.get_child(3)) if child_count == 6: - locs = self.handle_expr(exec_node.children[5]) - return ast.Exec(to_execute, globs, locs, exec_node.lineno, - exec_node.column) + locs = self.handle_expr(exec_node.get_child(5)) + return ast.Exec(to_execute, globs, locs, exec_node.get_lineno(), + exec_node.get_column()) def handle_assert_stmt(self, assert_node): - expr = self.handle_expr(assert_node.children[1]) + expr = self.handle_expr(assert_node.get_child(1)) msg = None - if len(assert_node.children) == 4: - msg = self.handle_expr(assert_node.children[3]) - return ast.Assert(expr, msg, assert_node.lineno, assert_node.column) + if assert_node.num_children() == 4: + msg = self.handle_expr(assert_node.get_child(3)) + return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column()) def handle_suite(self, suite_node): - first_child = suite_node.children[0] + first_child = suite_node.get_child(0) if first_child.type == syms.simple_stmt: - end = len(first_child.children) - 1 - if first_child.children[end - 1].type == tokens.SEMI: + end = first_child.num_children() - 1 + if first_child.get_child(end - 1).type == tokens.SEMI: end -= 1 - stmts = [self.handle_stmt(first_child.children[i]) + stmts = [self.handle_stmt(first_child.get_child(i)) for i in range(0, end, 2)] else: stmts = [] - for i in range(2, len(suite_node.children) - 1): - stmt = suite_node.children[i] + for i in range(2, suite_node.num_children() - 1): + stmt = suite_node.get_child(i) stmt_count = self.number_of_statements(stmt) if stmt_count == 1: stmts.append(self.handle_stmt(stmt)) else: - simple_stmt = stmt.children[0] - for j in range(0, len(simple_stmt.children), 2): - stmt = simple_stmt.children[j] - if not stmt.children: + simple_stmt = stmt.get_child(0) + for j in range(0, simple_stmt.num_children(), 2): + stmt = simple_stmt.get_child(j) + if not stmt.num_children(): break stmts.append(self.handle_stmt(stmt)) return stmts def handle_if_stmt(self, if_node): - child_count = len(if_node.children) + child_count = if_node.num_children() if child_count == 4: - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - return ast.If(test, suite, None, if_node.lineno, if_node.column) - otherwise_string = if_node.children[4].value + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column()) + otherwise_string = if_node.get_child(4).get_value() if otherwise_string == "else": - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - else_suite = self.handle_suite(if_node.children[6]) - return ast.If(test, suite, else_suite, if_node.lineno, - if_node.column) + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + else_suite = self.handle_suite(if_node.get_child(6)) + return ast.If(test, suite, else_suite, if_node.get_lineno(), + if_node.get_column()) elif otherwise_string == "elif": elif_count = child_count - 4 - after_elif = if_node.children[elif_count + 1] + after_elif = if_node.get_child(elif_count + 1) if after_elif.type == tokens.NAME and \ - after_elif.value == "else": + after_elif.get_value() == "else": has_else = True elif_count -= 3 else: has_else = False elif_count /= 4 if has_else: - last_elif = if_node.children[-6] + last_elif = if_node.get_child(-6) last_elif_test = self.handle_expr(last_elif) - elif_body = self.handle_suite(if_node.children[-4]) - else_body = self.handle_suite(if_node.children[-1]) + elif_body = self.handle_suite(if_node.get_child(-4)) + else_body = self.handle_suite(if_node.get_child(-1)) otherwise = [ast.If(last_elif_test, elif_body, else_body, - last_elif.lineno, last_elif.column)] + last_elif.get_lineno(), last_elif.get_column())] elif_count -= 1 else: otherwise = None for i in range(elif_count): offset = 5 + (elif_count - i - 1) * 4 - elif_test_node = if_node.children[offset] + elif_test_node = if_node.get_child(offset) elif_test = self.handle_expr(elif_test_node) - elif_body = self.handle_suite(if_node.children[offset + 2]) + elif_body = self.handle_suite(if_node.get_child(offset + 2)) new_if = ast.If(elif_test, elif_body, otherwise, - elif_test_node.lineno, elif_test_node.column) + elif_test_node.get_lineno(), elif_test_node.get_column()) otherwise = [new_if] - expr = self.handle_expr(if_node.children[1]) - body = self.handle_suite(if_node.children[3]) - return ast.If(expr, body, otherwise, if_node.lineno, if_node.column) + expr = self.handle_expr(if_node.get_child(1)) + body = self.handle_suite(if_node.get_child(3)) + return ast.If(expr, body, otherwise, if_node.get_lineno(), if_node.get_column()) else: raise AssertionError("unknown if statement configuration") def handle_while_stmt(self, while_node): - loop_test = self.handle_expr(while_node.children[1]) - body = self.handle_suite(while_node.children[3]) - if len(while_node.children) == 7: - otherwise = self.handle_suite(while_node.children[6]) + loop_test = self.handle_expr(while_node.get_child(1)) + body = self.handle_suite(while_node.get_child(3)) + if while_node.num_children() == 7: + otherwise = self.handle_suite(while_node.get_child(6)) else: otherwise = None - return ast.While(loop_test, body, otherwise, while_node.lineno, - while_node.column) + return ast.While(loop_test, body, otherwise, while_node.get_lineno(), + while_node.get_column()) def handle_for_stmt(self, for_node): - target_node = for_node.children[1] + target_node = for_node.get_child(1) target_as_exprlist = self.handle_exprlist(target_node, ast.Store) - if len(target_node.children) == 1: + if target_node.num_children() == 1: target = target_as_exprlist[0] else: target = ast.Tuple(target_as_exprlist, ast.Store, - target_node.lineno, target_node.column) - expr = self.handle_testlist(for_node.children[3]) - body = self.handle_suite(for_node.children[5]) - if len(for_node.children) == 9: - otherwise = self.handle_suite(for_node.children[8]) + target_node.get_lineno(), target_node.get_column()) + expr = self.handle_testlist(for_node.get_child(3)) + body = self.handle_suite(for_node.get_child(5)) + if for_node.num_children() == 9: + otherwise = self.handle_suite(for_node.get_child(8)) else: otherwise = None - return ast.For(target, expr, body, otherwise, for_node.lineno, - for_node.column) + return ast.For(target, expr, body, otherwise, for_node.get_lineno(), + for_node.get_column()) def handle_except_clause(self, exc, body): test = None target = None suite = self.handle_suite(body) - child_count = len(exc.children) + child_count = exc.num_children() if child_count >= 2: - test = self.handle_expr(exc.children[1]) + test = self.handle_expr(exc.get_child(1)) if child_count == 4: - target_child = exc.children[3] + target_child = exc.get_child(3) target = self.handle_expr(target_child) self.set_context(target, ast.Store) - return ast.ExceptHandler(test, target, suite, exc.lineno, exc.column) + return ast.ExceptHandler(test, target, suite, exc.get_lineno(), exc.get_column()) def handle_try_stmt(self, try_node): - body = self.handle_suite(try_node.children[2]) - child_count = len(try_node.children) + body = self.handle_suite(try_node.get_child(2)) + child_count = try_node.num_children() except_count = (child_count - 3 ) // 3 otherwise = None finally_suite = None - possible_extra_clause = try_node.children[-3] + possible_extra_clause = try_node.get_child(-3) if possible_extra_clause.type == tokens.NAME: - if possible_extra_clause.value == "finally": + if possible_extra_clause.get_value() == "finally": if child_count >= 9 and \ - try_node.children[-6].type == tokens.NAME: - otherwise = self.handle_suite(try_node.children[-4]) + try_node.get_child(-6).type == tokens.NAME: + otherwise = self.handle_suite(try_node.get_child(-4)) except_count -= 1 - finally_suite = self.handle_suite(try_node.children[-1]) + finally_suite = self.handle_suite(try_node.get_child(-1)) except_count -= 1 else: - otherwise = self.handle_suite(try_node.children[-1]) + otherwise = self.handle_suite(try_node.get_child(-1)) except_count -= 1 if except_count: handlers = [] for i in range(except_count): base_offset = i * 3 - exc = try_node.children[3 + base_offset] - except_body = try_node.children[5 + base_offset] + exc = try_node.get_child(3 + base_offset) + except_body = try_node.get_child(5 + base_offset) handlers.append(self.handle_except_clause(exc, except_body)) except_ast = ast.TryExcept(body, handlers, otherwise, - try_node.lineno, try_node.column) + try_node.get_lineno(), try_node.get_column()) if finally_suite is None: return except_ast body = [except_ast] - return ast.TryFinally(body, finally_suite, try_node.lineno, - try_node.column) + return ast.TryFinally(body, finally_suite, try_node.get_lineno(), + try_node.get_column()) def handle_with_stmt(self, with_node): - body = self.handle_suite(with_node.children[-1]) - i = len(with_node.children) - 1 + body = self.handle_suite(with_node.get_child(-1)) + i = with_node.num_children() - 1 while True: i -= 2 - item = with_node.children[i] - test = self.handle_expr(item.children[0]) - if len(item.children) == 3: - target = self.handle_expr(item.children[2]) + item = with_node.get_child(i) + test = self.handle_expr(item.get_child(0)) + if item.num_children() == 3: + target = self.handle_expr(item.get_child(2)) self.set_context(target, ast.Store) else: target = None - wi = ast.With(test, target, body, with_node.lineno, - with_node.column) + wi = ast.With(test, target, body, with_node.get_lineno(), + with_node.get_column()) if i == 1: break body = [wi] return wi def handle_classdef(self, classdef_node, decorators=None): - name_node = classdef_node.children[1] - name = name_node.value + name_node = classdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - if len(classdef_node.children) == 4: - body = self.handle_suite(classdef_node.children[3]) + if classdef_node.num_children() == 4: + body = self.handle_suite(classdef_node.get_child(3)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - if classdef_node.children[3].type == tokens.RPAR: - body = self.handle_suite(classdef_node.children[5]) + classdef_node.get_lineno(), classdef_node.get_column()) + if classdef_node.get_child(3).type == tokens.RPAR: + body = self.handle_suite(classdef_node.get_child(5)) return ast.ClassDef(name, None, body, decorators, - classdef_node.lineno, classdef_node.column) - bases = self.handle_class_bases(classdef_node.children[3]) - body = self.handle_suite(classdef_node.children[6]) - return ast.ClassDef(name, bases, body, decorators, classdef_node.lineno, - classdef_node.column) + classdef_node.get_lineno(), classdef_node.get_column()) + bases = self.handle_class_bases(classdef_node.get_child(3)) + body = self.handle_suite(classdef_node.get_child(6)) + return ast.ClassDef(name, bases, body, decorators, classdef_node.get_lineno(), + classdef_node.get_column()) def handle_class_bases(self, bases_node): - if len(bases_node.children) == 1: - return [self.handle_expr(bases_node.children[0])] + if bases_node.num_children() == 1: + return [self.handle_expr(bases_node.get_child(0))] return self.get_expression_list(bases_node) def handle_funcdef(self, funcdef_node, decorators=None): - name_node = funcdef_node.children[1] - name = name_node.value + name_node = funcdef_node.get_child(1) + name = name_node.get_value() self.check_forbidden_name(name, name_node) - args = self.handle_arguments(funcdef_node.children[2]) - body = self.handle_suite(funcdef_node.children[4]) + args = self.handle_arguments(funcdef_node.get_child(2)) + body = self.handle_suite(funcdef_node.get_child(4)) return ast.FunctionDef(name, args, body, decorators, - funcdef_node.lineno, funcdef_node.column) + funcdef_node.get_lineno(), funcdef_node.get_column()) def handle_decorated(self, decorated_node): - decorators = self.handle_decorators(decorated_node.children[0]) - definition = decorated_node.children[1] + decorators = self.handle_decorators(decorated_node.get_child(0)) + definition = decorated_node.get_child(1) if definition.type == syms.funcdef: node = self.handle_funcdef(definition, decorators) elif definition.type == syms.classdef: node = self.handle_classdef(definition, decorators) else: raise AssertionError("unkown decorated") - node.lineno = decorated_node.lineno - node.col_offset = decorated_node.column + node.lineno = decorated_node.get_lineno() + node.col_offset = decorated_node.get_column() return node def handle_decorators(self, decorators_node): - return [self.handle_decorator(dec) for dec in decorators_node.children] + return [self.handle_decorator(decorators_node.get_child(i)) + for i in range(decorators_node.num_children())] def handle_decorator(self, decorator_node): - dec_name = self.handle_dotted_name(decorator_node.children[1]) - if len(decorator_node.children) == 3: + dec_name = self.handle_dotted_name(decorator_node.get_child(1)) + if decorator_node.num_children() == 3: dec = dec_name - elif len(decorator_node.children) == 5: + elif decorator_node.num_children() == 5: dec = ast.Call(dec_name, None, None, None, None, - decorator_node.lineno, decorator_node.column) + decorator_node.get_lineno(), decorator_node.get_column()) else: - dec = self.handle_call(decorator_node.children[3], dec_name) + dec = self.handle_call(decorator_node.get_child(3), dec_name) return dec def handle_dotted_name(self, dotted_name_node): - base_value = dotted_name_node.children[0].value - name = ast.Name(base_value, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) - for i in range(2, len(dotted_name_node.children), 2): - attr = dotted_name_node.children[i].value - name = ast.Attribute(name, attr, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) + base_value = dotted_name_node.get_child(0).get_value() + name = ast.Name(base_value, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) + for i in range(2, dotted_name_node.num_children(), 2): + attr = dotted_name_node.get_child(i).get_value() + name = ast.Attribute(name, attr, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) return name def handle_arguments(self, arguments_node): if arguments_node.type == syms.parameters: - if len(arguments_node.children) == 2: + if arguments_node.num_children() == 2: return ast.arguments(None, None, None, None) - arguments_node = arguments_node.children[1] + arguments_node = arguments_node.get_child(1) i = 0 - child_count = len(arguments_node.children) + child_count = arguments_node.num_children() defaults = [] args = [] variable_arg = None keywords_arg = None have_default = False while i < child_count: - argument = arguments_node.children[i] + argument = arguments_node.get_child(i) arg_type = argument.type if arg_type == syms.fpdef: parenthesized = False complex_args = False while True: if i + 1 < child_count and \ - arguments_node.children[i + 1].type == tokens.EQUAL: - default_node = arguments_node.children[i + 2] + arguments_node.get_child(i + 1).type == tokens.EQUAL: + default_node = arguments_node.get_child(i + 2) defaults.append(self.handle_expr(default_node)) i += 2 have_default = True @@ -561,32 +562,32 @@ msg = ("non-default argument follows default " "argument") self.error(msg, arguments_node) - if len(argument.children) == 3: - sub_arg = argument.children[1] - if len(sub_arg.children) != 1: + if argument.num_children() == 3: + sub_arg = argument.get_child(1) + if sub_arg.num_children() != 1: complex_args = True args.append(self.handle_arg_unpacking(sub_arg)) else: parenthesized = True - argument = sub_arg.children[0] + argument = sub_arg.get_child(0) continue - if argument.children[0].type == tokens.NAME: - name_node = argument.children[0] - arg_name = name_node.value + if argument.get_child(0).type == tokens.NAME: + name_node = argument.get_child(0) + arg_name = name_node.get_value() self.check_forbidden_name(arg_name, name_node) - name = ast.Name(arg_name, ast.Param, name_node.lineno, - name_node.column) + name = ast.Name(arg_name, ast.Param, name_node.get_lineno(), + name_node.get_column()) args.append(name) i += 2 break elif arg_type == tokens.STAR: - name_node = arguments_node.children[i + 1] - variable_arg = name_node.value + name_node = arguments_node.get_child(i + 1) + variable_arg = name_node.get_value() self.check_forbidden_name(variable_arg, name_node) i += 3 elif arg_type == tokens.DOUBLESTAR: - name_node = arguments_node.children[i + 1] - keywords_arg = name_node.value + name_node = arguments_node.get_child(i + 1) + keywords_arg = name_node.get_value() self.check_forbidden_name(keywords_arg, name_node) i += 3 else: @@ -599,35 +600,35 @@ def handle_arg_unpacking(self, fplist_node): args = [] - for i in range((len(fplist_node.children) + 1) / 2): - fpdef_node = fplist_node.children[i * 2] + for i in range((fplist_node.num_children() + 1) / 2): + fpdef_node = fplist_node.get_child(i * 2) while True: - child = fpdef_node.children[0] + child = fpdef_node.get_child(0) if child.type == tokens.NAME: - arg = ast.Name(child.value, ast.Store, child.lineno, - child.column) + arg = ast.Name(child.get_value(), ast.Store, child.get_lineno(), + child.get_column()) args.append(arg) else: - child = fpdef_node.children[1] - if len(child.children) == 1: - fpdef_node = child.children[0] + child = fpdef_node.get_child(1) + if child.num_children() == 1: + fpdef_node = child.get_child(0) continue args.append(self.handle_arg_unpacking(child)) break - tup = ast.Tuple(args, ast.Store, fplist_node.lineno, fplist_node.column) + tup = ast.Tuple(args, ast.Store, fplist_node.get_lineno(), fplist_node.get_column()) self.set_context(tup, ast.Store) return tup def handle_stmt(self, stmt): stmt_type = stmt.type if stmt_type == syms.stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.simple_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.small_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.expr_stmt: return self.handle_expr_stmt(stmt) @@ -636,7 +637,7 @@ elif stmt_type == syms.del_stmt: return self.handle_del_stmt(stmt) elif stmt_type == syms.pass_stmt: - return ast.Pass(stmt.lineno, stmt.column) + return ast.Pass(stmt.get_lineno(), stmt.get_column()) elif stmt_type == syms.flow_stmt: return self.handle_flow_stmt(stmt) elif stmt_type == syms.import_stmt: @@ -650,7 +651,7 @@ else: raise AssertionError("unhandled small statement") elif stmt_type == syms.compound_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.if_stmt: return self.handle_if_stmt(stmt) @@ -674,113 +675,113 @@ raise AssertionError("unknown statment type") def handle_expr_stmt(self, stmt): - if len(stmt.children) == 1: - expression = self.handle_testlist(stmt.children[0]) - return ast.Expr(expression, stmt.lineno, stmt.column) - elif stmt.children[1].type == syms.augassign: + if stmt.num_children() == 1: + expression = self.handle_testlist(stmt.get_child(0)) + return ast.Expr(expression, stmt.get_lineno(), stmt.get_column()) + elif stmt.get_child(1).type == syms.augassign: # Augmented assignment. - target_child = stmt.children[0] + target_child = stmt.get_child(0) target_expr = self.handle_testlist(target_child) self.set_context(target_expr, ast.Store) - value_child = stmt.children[2] + value_child = stmt.get_child(2) if value_child.type == syms.testlist: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - op_str = stmt.children[1].children[0].value + op_str = stmt.get_child(1).get_child(0).get_value() operator = augassign_operator_map[op_str] return ast.AugAssign(target_expr, operator, value_expr, - stmt.lineno, stmt.column) + stmt.get_lineno(), stmt.get_column()) else: # Normal assignment. targets = [] - for i in range(0, len(stmt.children) - 2, 2): - target_node = stmt.children[i] + for i in range(0, stmt.num_children() - 2, 2): + target_node = stmt.get_child(i) if target_node.type == syms.yield_expr: self.error("can't assign to yield expression", target_node) target_expr = self.handle_testlist(target_node) self.set_context(target_expr, ast.Store) targets.append(target_expr) - value_child = stmt.children[-1] + value_child = stmt.get_child(-1) if value_child.type == syms.testlist: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - return ast.Assign(targets, value_expr, stmt.lineno, stmt.column) + return ast.Assign(targets, value_expr, stmt.get_lineno(), stmt.get_column()) def get_expression_list(self, tests): - return [self.handle_expr(tests.children[i]) - for i in range(0, len(tests.children), 2)] + return [self.handle_expr(tests.get_child(i)) + for i in range(0, tests.num_children(), 2)] def handle_testlist(self, tests): - if len(tests.children) == 1: - return self.handle_expr(tests.children[0]) + if tests.num_children() == 1: + return self.handle_expr(tests.get_child(0)) else: elts = self.get_expression_list(tests) - return ast.Tuple(elts, ast.Load, tests.lineno, tests.column) + return ast.Tuple(elts, ast.Load, tests.get_lineno(), tests.get_column()) def handle_expr(self, expr_node): # Loop until we return something. while True: expr_node_type = expr_node.type if expr_node_type == syms.test or expr_node_type == syms.old_test: - first_child = expr_node.children[0] + first_child = expr_node.get_child(0) if first_child.type in (syms.lambdef, syms.old_lambdef): return self.handle_lambdef(first_child) - elif len(expr_node.children) > 1: + elif expr_node.num_children() > 1: return self.handle_ifexp(expr_node) else: expr_node = first_child elif expr_node_type == syms.or_test or \ expr_node_type == syms.and_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - seq = [self.handle_expr(expr_node.children[i]) - for i in range(0, len(expr_node.children), 2)] + seq = [self.handle_expr(expr_node.get_child(i)) + for i in range(0, expr_node.num_children(), 2)] if expr_node_type == syms.or_test: op = ast.Or else: op = ast.And - return ast.BoolOp(op, seq, expr_node.lineno, expr_node.column) + return ast.BoolOp(op, seq, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.not_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - expr = self.handle_expr(expr_node.children[1]) - return ast.UnaryOp(ast.Not, expr, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(1)) + return ast.UnaryOp(ast.Not, expr, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.comparison: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue operators = [] operands = [] - expr = self.handle_expr(expr_node.children[0]) - for i in range(1, len(expr_node.children), 2): - operators.append(self.handle_comp_op(expr_node.children[i])) - operands.append(self.handle_expr(expr_node.children[i + 1])) - return ast.Compare(expr, operators, operands, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(0)) + for i in range(1, expr_node.num_children(), 2): + operators.append(self.handle_comp_op(expr_node.get_child(i))) + operands.append(self.handle_expr(expr_node.get_child(i + 1))) + return ast.Compare(expr, operators, operands, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.expr or \ expr_node_type == syms.xor_expr or \ expr_node_type == syms.and_expr or \ expr_node_type == syms.shift_expr or \ expr_node_type == syms.arith_expr or \ expr_node_type == syms.term: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_binop(expr_node) elif expr_node_type == syms.yield_expr: - if len(expr_node.children) == 2: - exp = self.handle_testlist(expr_node.children[1]) + if expr_node.num_children() == 2: + exp = self.handle_testlist(expr_node.get_child(1)) else: exp = None - return ast.Yield(exp, expr_node.lineno, expr_node.column) + return ast.Yield(exp, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.factor: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_factor(expr_node) elif expr_node_type == syms.power: @@ -789,24 +790,24 @@ raise AssertionError("unknown expr") def handle_lambdef(self, lambdef_node): - expr = self.handle_expr(lambdef_node.children[-1]) - if len(lambdef_node.children) == 3: + expr = self.handle_expr(lambdef_node.get_child(-1)) + if lambdef_node.num_children() == 3: args = ast.arguments(None, None, None, None) else: - args = self.handle_arguments(lambdef_node.children[1]) - return ast.Lambda(args, expr, lambdef_node.lineno, lambdef_node.column) + args = self.handle_arguments(lambdef_node.get_child(1)) + return ast.Lambda(args, expr, lambdef_node.get_lineno(), lambdef_node.get_column()) def handle_ifexp(self, if_expr_node): - body = self.handle_expr(if_expr_node.children[0]) - expression = self.handle_expr(if_expr_node.children[2]) - otherwise = self.handle_expr(if_expr_node.children[4]) - return ast.IfExp(expression, body, otherwise, if_expr_node.lineno, - if_expr_node.column) + body = self.handle_expr(if_expr_node.get_child(0)) + expression = self.handle_expr(if_expr_node.get_child(2)) + otherwise = self.handle_expr(if_expr_node.get_child(4)) + return ast.IfExp(expression, body, otherwise, if_expr_node.get_lineno(), + if_expr_node.get_column()) def handle_comp_op(self, comp_op_node): - comp_node = comp_op_node.children[0] + comp_node = comp_op_node.get_child(0) comp_type = comp_node.type - if len(comp_op_node.children) == 1: + if comp_op_node.num_children() == 1: if comp_type == tokens.LESS: return ast.Lt elif comp_type == tokens.GREATER: @@ -820,53 +821,55 @@ elif comp_type == tokens.NOTEQUAL: return ast.NotEq elif comp_type == tokens.NAME: - if comp_node.value == "is": + if comp_node.get_value() == "is": return ast.Is - elif comp_node.value == "in": + elif comp_node.get_value() == "in": return ast.In else: raise AssertionError("invalid comparison") else: raise AssertionError("invalid comparison") else: - if comp_op_node.children[1].value == "in": + if comp_op_node.get_child(1).get_value() == "in": return ast.NotIn - elif comp_node.value == "is": + elif comp_node.get_value() == "is": return ast.IsNot else: raise AssertionError("invalid comparison") def handle_binop(self, binop_node): - left = self.handle_expr(binop_node.children[0]) - right = self.handle_expr(binop_node.children[2]) - op = operator_map(binop_node.children[1].type) - result = ast.BinOp(left, op, right, binop_node.lineno, - binop_node.column) - number_of_ops = (len(binop_node.children) - 1) / 2 + left = self.handle_expr(binop_node.get_child(0)) + right = self.handle_expr(binop_node.get_child(2)) + op = operator_map(binop_node.get_child(1).type) + result = ast.BinOp(left, op, right, binop_node.get_lineno(), + binop_node.get_column()) + number_of_ops = (binop_node.num_children() - 1) / 2 for i in range(1, number_of_ops): - op_node = binop_node.children[i * 2 + 1] + op_node = binop_node.get_child(i * 2 + 1) op = operator_map(op_node.type) - sub_right = self.handle_expr(binop_node.children[i * 2 + 2]) - result = ast.BinOp(result, op, sub_right, op_node.lineno, - op_node.column) + sub_right = self.handle_expr(binop_node.get_child(i * 2 + 2)) + result = ast.BinOp(result, op, sub_right, op_node.get_lineno(), + op_node.get_column()) return result def handle_factor(self, factor_node): + from pypy.interpreter.pyparser.parser import Terminal # Fold '-' on constant numbers. - if factor_node.children[0].type == tokens.MINUS and \ - len(factor_node.children) == 2: - factor = factor_node.children[1] - if factor.type == syms.factor and len(factor.children) == 1: - power = factor.children[0] - if power.type == syms.power and len(power.children) == 1: - atom = power.children[0] + if factor_node.get_child(0).type == tokens.MINUS and \ + factor_node.num_children() == 2: + factor = factor_node.get_child(1) + if factor.type == syms.factor and factor.num_children() == 1: + power = factor.get_child(0) + if power.type == syms.power and power.num_children() == 1: + atom = power.get_child(0) if atom.type == syms.atom and \ - atom.children[0].type == tokens.NUMBER: - num = atom.children[0] - num.value = "-" + num.value + atom.get_child(0).type == tokens.NUMBER: + num = atom.get_child(0) + assert isinstance(num, Terminal) + num.value = "-" + num.get_value() return self.handle_atom(atom) - expr = self.handle_expr(factor_node.children[1]) - op_type = factor_node.children[0].type + expr = self.handle_expr(factor_node.get_child(1)) + op_type = factor_node.get_child(0).type if op_type == tokens.PLUS: op = ast.UAdd elif op_type == tokens.MINUS: @@ -875,31 +878,31 @@ op = ast.Invert else: raise AssertionError("invalid factor node") - return ast.UnaryOp(op, expr, factor_node.lineno, factor_node.column) + return ast.UnaryOp(op, expr, factor_node.get_lineno(), factor_node.get_column()) def handle_power(self, power_node): - atom_expr = self.handle_atom(power_node.children[0]) - if len(power_node.children) == 1: + atom_expr = self.handle_atom(power_node.get_child(0)) + if power_node.num_children() == 1: return atom_expr - for i in range(1, len(power_node.children)): - trailer = power_node.children[i] + for i in range(1, power_node.num_children()): + trailer = power_node.get_child(i) if trailer.type != syms.trailer: break tmp_atom_expr = self.handle_trailer(trailer, atom_expr) tmp_atom_expr.lineno = atom_expr.lineno tmp_atom_expr.col_offset = atom_expr.col_offset atom_expr = tmp_atom_expr - if power_node.children[-1].type == syms.factor: - right = self.handle_expr(power_node.children[-1]) - atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.lineno, - power_node.column) + if power_node.get_child(-1).type == syms.factor: + right = self.handle_expr(power_node.get_child(-1)) + atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.get_lineno(), + power_node.get_column()) return atom_expr def handle_slice(self, slice_node): - first_child = slice_node.children[0] + first_child = slice_node.get_child(0) if first_child.type == tokens.DOT: return ast.Ellipsis() - if len(slice_node.children) == 1 and first_child.type == syms.test: + if slice_node.num_children() == 1 and first_child.type == syms.test: index = self.handle_expr(first_child) return ast.Index(index) lower = None @@ -908,71 +911,72 @@ if first_child.type == syms.test: lower = self.handle_expr(first_child) if first_child.type == tokens.COLON: - if len(slice_node.children) > 1: - second_child = slice_node.children[1] + if slice_node.num_children() > 1: + second_child = slice_node.get_child(1) if second_child.type == syms.test: upper = self.handle_expr(second_child) - elif len(slice_node.children) > 2: - third_child = slice_node.children[2] + elif slice_node.num_children() > 2: + third_child = slice_node.get_child(2) if third_child.type == syms.test: upper = self.handle_expr(third_child) - last_child = slice_node.children[-1] + last_child = slice_node.get_child(-1) if last_child.type == syms.sliceop: - if len(last_child.children) == 1: - step = ast.Name("None", ast.Load, last_child.lineno, - last_child.column) + if last_child.num_children() == 1: + step = ast.Name("None", ast.Load, last_child.get_lineno(), + last_child.get_column()) else: - step_child = last_child.children[1] + step_child = last_child.get_child(1) if step_child.type == syms.test: step = self.handle_expr(step_child) return ast.Slice(lower, upper, step) def handle_trailer(self, trailer_node, left_expr): - first_child = trailer_node.children[0] + first_child = trailer_node.get_child(0) if first_child.type == tokens.LPAR: - if len(trailer_node.children) == 2: + if trailer_node.num_children() == 2: return ast.Call(left_expr, None, None, None, None, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - return self.handle_call(trailer_node.children[1], left_expr) + return self.handle_call(trailer_node.get_child(1), left_expr) elif first_child.type == tokens.DOT: - attr = trailer_node.children[1].value + attr = trailer_node.get_child(1).get_value() return ast.Attribute(left_expr, attr, ast.Load, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - middle = trailer_node.children[1] - if len(middle.children) == 1: - slice = self.handle_slice(middle.children[0]) + middle = trailer_node.get_child(1) + if middle.num_children() == 1: + slice = self.handle_slice(middle.get_child(0)) return ast.Subscript(left_expr, slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) slices = [] simple = True - for i in range(0, len(middle.children), 2): - slc = self.handle_slice(middle.children[i]) + for i in range(0, middle.num_children(), 2): + slc = self.handle_slice(middle.get_child(i)) if not isinstance(slc, ast.Index): simple = False slices.append(slc) if not simple: ext_slice = ast.ExtSlice(slices) return ast.Subscript(left_expr, ext_slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) elts = [] for idx in slices: assert isinstance(idx, ast.Index) elts.append(idx.value) - tup = ast.Tuple(elts, ast.Load, middle.lineno, middle.column) + tup = ast.Tuple(elts, ast.Load, middle.get_lineno(), middle.get_column()) return ast.Subscript(left_expr, ast.Index(tup), ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) def handle_call(self, args_node, callable_expr): arg_count = 0 keyword_count = 0 generator_count = 0 - for argument in args_node.children: + for i in range(args_node.num_children()): + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: + if argument.num_children() == 1: arg_count += 1 - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: generator_count += 1 else: keyword_count += 1 @@ -987,13 +991,13 @@ used_keywords = {} variable_arg = None keywords_arg = None - child_count = len(args_node.children) + child_count = args_node.num_children() i = 0 while i < child_count: - argument = args_node.children[i] + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: - expr_node = argument.children[0] + if argument.num_children() == 1: + expr_node = argument.get_child(0) if keywords: self.error("non-keyword arg after keyword arg", expr_node) @@ -1001,10 +1005,10 @@ self.error("only named arguments may follow " "*expression", expr_node) args.append(self.handle_expr(expr_node)) - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: args.append(self.handle_genexp(argument)) else: - keyword_node = argument.children[0] + keyword_node = argument.get_child(0) keyword_expr = self.handle_expr(keyword_node) if isinstance(keyword_expr, ast.Lambda): self.error("lambda cannot contain assignment", @@ -1017,13 +1021,13 @@ self.error("keyword argument repeated", keyword_node) used_keywords[keyword] = None self.check_forbidden_name(keyword, keyword_node) - keyword_value = self.handle_expr(argument.children[2]) + keyword_value = self.handle_expr(argument.get_child(2)) keywords.append(ast.keyword(keyword, keyword_value)) elif argument.type == tokens.STAR: - variable_arg = self.handle_expr(args_node.children[i + 1]) + variable_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 elif argument.type == tokens.DOUBLESTAR: - keywords_arg = self.handle_expr(args_node.children[i + 1]) + keywords_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 i += 1 if not args: @@ -1082,20 +1086,20 @@ return self.space.call_function(self.space.w_float, w_num_str) def handle_atom(self, atom_node): - first_child = atom_node.children[0] + first_child = atom_node.get_child(0) first_child_type = first_child.type if first_child_type == tokens.NAME: - return ast.Name(first_child.value, ast.Load, - first_child.lineno, first_child.column) + return ast.Name(first_child.get_value(), ast.Load, + first_child.get_lineno(), first_child.get_column()) elif first_child_type == tokens.STRING: space = self.space encoding = self.compile_info.encoding flags = self.compile_info.flags unicode_literals = flags & consts.CO_FUTURE_UNICODE_LITERALS try: - sub_strings_w = [parsestring.parsestr(space, encoding, s.value, + sub_strings_w = [parsestring.parsestr(space, encoding, atom_node.get_child(i).get_value(), unicode_literals) - for s in atom_node.children] + for i in range(atom_node.num_children())] except error.OperationError, e: if not e.match(space, space.w_UnicodeError): raise @@ -1109,59 +1113,59 @@ final_string = space.call_function(w_join, w_sub_strings) else: final_string = sub_strings_w[0] - return ast.Str(final_string, atom_node.lineno, atom_node.column) + return ast.Str(final_string, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.NUMBER: - num_value = self.parse_number(first_child.value) - return ast.Num(num_value, atom_node.lineno, atom_node.column) + num_value = self.parse_number(first_child.get_value()) + return ast.Num(num_value, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.LPAR: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RPAR: - return ast.Tuple(None, ast.Load, atom_node.lineno, - atom_node.column) + return ast.Tuple(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) elif second_child.type == syms.yield_expr: return self.handle_expr(second_child) return self.handle_testlist_gexp(second_child) elif first_child_type == tokens.LSQB: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RSQB: - return ast.List(None, ast.Load, atom_node.lineno, - atom_node.column) - if len(second_child.children) == 1 or \ - second_child.children[1].type == tokens.COMMA: + return ast.List(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) + if second_child.num_children() == 1 or \ + second_child.get_child(1).type == tokens.COMMA: elts = self.get_expression_list(second_child) - return ast.List(elts, ast.Load, atom_node.lineno, - atom_node.column) + return ast.List(elts, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) return self.handle_listcomp(second_child) elif first_child_type == tokens.LBRACE: - maker = atom_node.children[1] + maker = atom_node.get_child(1) if maker.type == tokens.RBRACE: - return ast.Dict(None, None, atom_node.lineno, atom_node.column) - n_maker_children = len(maker.children) - if n_maker_children == 1 or maker.children[1].type == tokens.COMMA: + return ast.Dict(None, None, atom_node.get_lineno(), atom_node.get_column()) + n_maker_children = maker.num_children() + if n_maker_children == 1 or maker.get_child(1).type == tokens.COMMA: elts = [] for i in range(0, n_maker_children, 2): - elts.append(self.handle_expr(maker.children[i])) - return ast.Set(elts, atom_node.lineno, atom_node.column) - if maker.children[1].type == syms.comp_for: + elts.append(self.handle_expr(maker.get_child(i))) + return ast.Set(elts, atom_node.get_lineno(), atom_node.get_column()) + if maker.get_child(1).type == syms.comp_for: return self.handle_setcomp(maker) if (n_maker_children > 3 and - maker.children[3].type == syms.comp_for): + maker.get_child(3).type == syms.comp_for): return self.handle_dictcomp(maker) keys = [] values = [] for i in range(0, n_maker_children, 4): - keys.append(self.handle_expr(maker.children[i])) - values.append(self.handle_expr(maker.children[i + 2])) - return ast.Dict(keys, values, atom_node.lineno, atom_node.column) + keys.append(self.handle_expr(maker.get_child(i))) + values.append(self.handle_expr(maker.get_child(i + 2))) + return ast.Dict(keys, values, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.BACKQUOTE: - expr = self.handle_testlist(atom_node.children[1]) - return ast.Repr(expr, atom_node.lineno, atom_node.column) + expr = self.handle_testlist(atom_node.get_child(1)) + return ast.Repr(expr, atom_node.get_lineno(), atom_node.get_column()) else: raise AssertionError("unknown atom") def handle_testlist_gexp(self, gexp_node): - if len(gexp_node.children) > 1 and \ - gexp_node.children[1].type == syms.comp_for: + if gexp_node.num_children() > 1 and \ + gexp_node.get_child(1).type == syms.comp_for: return self.handle_genexp(gexp_node) return self.handle_testlist(gexp_node) @@ -1170,18 +1174,18 @@ current_for = comp_node while True: count += 1 - if len(current_for.children) == 5: - current_iter = current_for.children[4] + if current_for.num_children() == 5: + current_iter = current_for.get_child(4) else: return count while True: - first_child = current_iter.children[0] + first_child = current_iter.get_child(0) if first_child.type == for_type: - current_for = current_iter.children[0] + current_for = current_iter.get_child(0) break elif first_child.type == if_type: - if len(first_child.children) == 3: - current_iter = first_child.children[2] + if first_child.num_children() == 3: + current_iter = first_child.get_child(2) else: return count else: @@ -1190,13 +1194,13 @@ def count_comp_ifs(self, iter_node, for_type): count = 0 while True: - first_child = iter_node.children[0] + first_child = iter_node.get_child(0) if first_child.type == for_type: return count count += 1 - if len(first_child.children) == 2: + if first_child.num_children() == 2: return count - iter_node = first_child.children[2] + iter_node = first_child.get_child(2) @specialize.arg(2) def comprehension_helper(self, comp_node, @@ -1208,15 +1212,15 @@ fors_count = self.count_comp_fors(comp_node, for_type, if_type) comps = [] for i in range(fors_count): - for_node = comp_node.children[1] + for_node = comp_node.get_child(1) for_targets = self.handle_exprlist(for_node, ast.Store) - expr = handle_source_expression(comp_node.children[3]) + expr = handle_source_expression(comp_node.get_child(3)) assert isinstance(expr, ast.expr) - if len(for_node.children) == 1: + if for_node.num_children() == 1: comp = ast.comprehension(for_targets[0], expr, None) else: - col = comp_node.column - line = comp_node.lineno + col = comp_node.get_column() + line = comp_node.get_lineno() # Modified in python2.7, see http://bugs.python.org/issue6704 if comp_fix_unamed_tuple_location: expr_node = for_targets[0] @@ -1225,59 +1229,59 @@ line = expr_node.lineno target = ast.Tuple(for_targets, ast.Store, line, col) comp = ast.comprehension(target, expr, None) - if len(comp_node.children) == 5: - comp_node = comp_iter = comp_node.children[4] + if comp_node.num_children() == 5: + comp_node = comp_iter = comp_node.get_child(4) assert comp_iter.type == iter_type ifs_count = self.count_comp_ifs(comp_iter, for_type) if ifs_count: ifs = [] for j in range(ifs_count): - comp_node = comp_if = comp_iter.children[0] - ifs.append(self.handle_expr(comp_if.children[1])) - if len(comp_if.children) == 3: - comp_node = comp_iter = comp_if.children[2] + comp_node = comp_if = comp_iter.get_child(0) + ifs.append(self.handle_expr(comp_if.get_child(1))) + if comp_if.num_children() == 3: + comp_node = comp_iter = comp_if.get_child(2) comp.ifs = ifs if comp_node.type == iter_type: - comp_node = comp_node.children[0] + comp_node = comp_node.get_child(0) assert isinstance(comp, ast.comprehension) comps.append(comp) return comps def handle_genexp(self, genexp_node): - elt = self.handle_expr(genexp_node.children[0]) - comps = self.comprehension_helper(genexp_node.children[1], + elt = self.handle_expr(genexp_node.get_child(0)) + comps = self.comprehension_helper(genexp_node.get_child(1), comp_fix_unamed_tuple_location=True) - return ast.GeneratorExp(elt, comps, genexp_node.lineno, - genexp_node.column) + return ast.GeneratorExp(elt, comps, genexp_node.get_lineno(), + genexp_node.get_column()) def handle_listcomp(self, listcomp_node): - elt = self.handle_expr(listcomp_node.children[0]) - comps = self.comprehension_helper(listcomp_node.children[1], + elt = self.handle_expr(listcomp_node.get_child(0)) + comps = self.comprehension_helper(listcomp_node.get_child(1), "handle_testlist", syms.list_for, syms.list_if, syms.list_iter, comp_fix_unamed_tuple_location=True) - return ast.ListComp(elt, comps, listcomp_node.lineno, - listcomp_node.column) + return ast.ListComp(elt, comps, listcomp_node.get_lineno(), + listcomp_node.get_column()) def handle_setcomp(self, set_maker): - elt = self.handle_expr(set_maker.children[0]) - comps = self.comprehension_helper(set_maker.children[1], + elt = self.handle_expr(set_maker.get_child(0)) + comps = self.comprehension_helper(set_maker.get_child(1), comp_fix_unamed_tuple_location=True) - return ast.SetComp(elt, comps, set_maker.lineno, set_maker.column) + return ast.SetComp(elt, comps, set_maker.get_lineno(), set_maker.get_column()) def handle_dictcomp(self, dict_maker): - key = self.handle_expr(dict_maker.children[0]) - value = self.handle_expr(dict_maker.children[2]) - comps = self.comprehension_helper(dict_maker.children[3], + key = self.handle_expr(dict_maker.get_child(0)) + value = self.handle_expr(dict_maker.get_child(2)) + comps = self.comprehension_helper(dict_maker.get_child(3), comp_fix_unamed_tuple_location=True) - return ast.DictComp(key, value, comps, dict_maker.lineno, - dict_maker.column) + return ast.DictComp(key, value, comps, dict_maker.get_lineno(), + dict_maker.get_column()) def handle_exprlist(self, exprlist, context): exprs = [] - for i in range(0, len(exprlist.children), 2): - child = exprlist.children[i] + for i in range(0, exprlist.num_children(), 2): + child = exprlist.get_child(i) expr = self.handle_expr(child) self.set_context(expr, context) exprs.append(expr) diff --git a/pypy/interpreter/pyparser/parser.py b/pypy/interpreter/pyparser/parser.py --- a/pypy/interpreter/pyparser/parser.py +++ b/pypy/interpreter/pyparser/parser.py @@ -44,26 +44,125 @@ class Node(object): - __slots__ = "type value children lineno column".split() + __slots__ = ("type", ) - def __init__(self, type, value, children, lineno, column): + def __init__(self, type): self.type = type + + def __eq__(self, other): + raise NotImplementedError("abstract base class") + + def __ne__(self, other): + return not self == other + + def get_value(self): + return None + + def get_child(self, i): + raise NotImplementedError("abstract base class") + + def num_children(self): + return 0 + + def append_child(self, child): + raise NotImplementedError("abstract base class") + + def get_lineno(self): + raise NotImplementedError("abstract base class") + + def get_column(self): + raise NotImplementedError("abstract base class") + + +class Terminal(Node): + __slots__ = ("value", "lineno", "column") + def __init__(self, type, value, lineno, column): + Node.__init__(self, type) self.value = value - self.children = children self.lineno = lineno self.column = column + def __repr__(self): + return "Terminal(type=%s, value=%r)" % (self.type, self.value) + def __eq__(self, other): # For tests. - return (self.type == other.type and - self.value == other.value and - self.children == other.children) + return (type(self) == type(other) and + self.type == other.type and + self.value == other.value) + + def get_value(self): + return self.value + + def get_lineno(self): + return self.lineno + + def get_column(self): + return self.column + + +class AbstractNonterminal(Node): + __slots__ = () + + def get_lineno(self): + return self.get_child(0).get_lineno() + + def get_column(self): + return self.get_child(0).get_column() + + def __eq__(self, other): + # For tests. + # grumble, annoying + if not isinstance(other, AbstractNonterminal): + return False + if self.type != other.type: + return False + if self.num_children() != other.num_children(): + return False + for i in range(self.num_children()): + if self.get_child(i) != other.get_child(i): + return False + return True + + +class Nonterminal(AbstractNonterminal): + __slots__ = ("_children", ) + def __init__(self, type, children): + Node.__init__(self, type) + self._children = children def __repr__(self): - if self.value is None: - return "Node(type=%s, children=%r)" % (self.type, self.children) - else: - return "Node(type=%s, value=%r)" % (self.type, self.value) + return "Nonterminal(type=%s, children=%r)" % (self.type, self._children) + + def get_child(self, i): + return self._children[i] + + def num_children(self): + return len(self._children) + + def append_child(self, child): + self._children.append(child) + + +class Nonterminal1(AbstractNonterminal): + __slots__ = ("_child", ) + def __init__(self, type, child): + Node.__init__(self, type) + self._child = child + + def __repr__(self): + return "Nonterminal(type=%s, children=[%r])" % (self.type, self._child) + + def get_child(self, i): + assert i == 0 or i == -1 + return self._child + + def num_children(self): + return 1 + + def append_child(self, child): + assert 0, "should be unreachable" + class ParseError(Exception): @@ -97,7 +196,7 @@ if start == -1: start = self.grammar.start self.root = None - current_node = Node(start, None, [], 0, 0) + current_node = Nonterminal(start, []) self.stack = [] self.stack.append((self.grammar.dfas[start - 256], 0, current_node)) @@ -164,14 +263,14 @@ def shift(self, next_state, token_type, value, lineno, column): """Shift a non-terminal and prepare for the next state.""" dfa, state, node = self.stack[-1] - new_node = Node(token_type, value, None, lineno, column) - node.children.append(new_node) + new_node = Terminal(token_type, value, lineno, column) + node.append_child(new_node) self.stack[-1] = (dfa, next_state, node) def push(self, next_dfa, next_state, node_type, lineno, column): """Push a terminal and adjust the current state.""" dfa, state, node = self.stack[-1] - new_node = Node(node_type, None, [], lineno, column) + new_node = Nonterminal(node_type, []) self.stack[-1] = (dfa, next_state, node) self.stack.append((next_dfa, 0, new_node)) @@ -179,6 +278,10 @@ """Pop an entry off the stack and make its node a child of the last.""" dfa, state, node = self.stack.pop() if self.stack: - self.stack[-1][2].children.append(node) + # we are now done with node, so we can store it more efficiently if + # it has just one child + if node.num_children() == 1: + node = Nonterminal1(node.type, node.get_child(0)) + self.stack[-1][2].append_child(node) else: self.root = node diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py --- a/pypy/interpreter/pyparser/pygram.py +++ b/pypy/interpreter/pyparser/pygram.py @@ -31,8 +31,11 @@ class _Symbols(object): pass +rev_lookup = {} for sym_name, idx in python_grammar.symbol_ids.iteritems(): setattr(_Symbols, sym_name, idx) + rev_lookup[idx] = sym_name syms = _Symbols() +syms._rev_lookup = rev_lookup # for debugging del _get_python_grammar, _Tokens, tok_name, sym_name, idx diff --git a/pypy/interpreter/pyparser/test/test_parser.py b/pypy/interpreter/pyparser/test/test_parser.py --- a/pypy/interpreter/pyparser/test/test_parser.py +++ b/pypy/interpreter/pyparser/test/test_parser.py @@ -52,24 +52,23 @@ value = "\n" else: value = "" - children = None + n = parser.Terminal(tp, value, 0, 0) else: tp = gram.symbol_ids[data[0]] - value = None children = [] - n = parser.Node(tp, value, children, 0, 0) + n = parser.Nonterminal(tp, children) new_indent = count_indent(line) if new_indent >= last_indent: if new_indent == last_indent and node_stack: node_stack.pop() if node_stack: - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) else: diff = last_indent - new_indent pop_nodes = diff // 4 + 1 del node_stack[-pop_nodes:] - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) last_indent = new_indent return node_stack[0] diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py --- a/pypy/module/_cffi_backend/lib_obj.py +++ b/pypy/module/_cffi_backend/lib_obj.py @@ -64,7 +64,8 @@ # ptr = rffi.cast(rffi.CCHARP, g.c_address) assert ptr - return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn, + return W_FunctionWrapper(self.space, self.ffi, + ptr, g.c_size_or_direct_fn, rawfunctype, fnname, self.libname) @jit.elidable_promote() diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -238,7 +238,7 @@ self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and locs[0] == 'R') - def unexpected_fn_type(self, ffi): + def repr_fn_type(self, ffi, repl=""): fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: @@ -246,9 +246,14 @@ sargs = ', '.join(argnames) sret1 = fret.name[:fret.name_position] sret2 = fret.name[fret.name_position:] + if len(repl) > 0 and not sret1.endswith('*'): + repl = " " + repl + return '%s%s(%s)%s' % (sret1, repl, sargs, sret2) + + def unexpected_fn_type(self, ffi): raise oefmt(ffi.w_FFIError, - "the type '%s(%s)%s' is a function type, not a " - "pointer-to-function type", sret1, sargs, sret2) + "the type '%s' is a function type, not a " + "pointer-to-function type", self.repr_fn_type(ffi)) def realize_c_type(ffi, opcodes, index): diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -420,9 +420,11 @@ def test_math_sin_type(self): ffi, lib = self.prepare( - "double sin(double);", + "double sin(double); void *xxtestfunc();", 'test_math_sin_type', - '#include ') + """#include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -432,7 +434,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__=='direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(self): ffi, lib = self.prepare( diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.typedef import GetSetProperty from pypy.interpreter.gateway import interp2app from rpython.rlib import jit @@ -24,9 +25,8 @@ This class cannot be used for variadic functions. """ _immutable_ = True - common_doc_str = 'direct call to the C function of the same name' - def __init__(self, space, fnptr, directfnptr, + def __init__(self, space, ffi, fnptr, directfnptr, rawfunctype, fnname, modulename): # everything related to the type of the function is accessed # as immutable attributes of the 'rawfunctype' object, which @@ -39,6 +39,7 @@ assert locs is None or len(ctype.fargs) == len(locs) # self.space = space + self.ffi = ffi self.fnptr = fnptr self.directfnptr = directfnptr self.rawfunctype = rawfunctype @@ -93,6 +94,11 @@ def descr_repr(self, space): return space.wrap("" % (self.fnname,)) + def descr_get_doc(self, space): + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + doc = '%s;\n\nCFFI C function from %s.lib' % (doc, self.modulename) + return space.wrap(doc) + @jit.unroll_safe def prepare_args(space, rawfunctype, args_w, start_index): @@ -128,6 +134,6 @@ __call__ = interp2app(W_FunctionWrapper.descr_call), __name__ = interp_attrproperty('fnname', cls=W_FunctionWrapper), __module__ = interp_attrproperty('modulename', cls=W_FunctionWrapper), - __doc__ = interp_attrproperty('common_doc_str', cls=W_FunctionWrapper), + __doc__ = GetSetProperty(W_FunctionWrapper.descr_get_doc), ) W_FunctionWrapper.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -53,7 +53,8 @@ def PyEval_ThreadsInitialized(space): if not space.config.translation.thread: return 0 - return 1 + from pypy.module.thread import os_thread + return int(os_thread.threads_initialized(space)) # XXX: might be generally useful def encapsulator(T, flavor='raw', dealloc=None): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -136,7 +136,21 @@ return PyLong_FromLong(3); """), ]) + res = module.bounce() + assert res == 3 + def test_threadsinitialized(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + """ + return PyInt_FromLong(PyEval_ThreadsInitialized()); + """), + ]) + res = module.test() + print "got", res + assert res in (0, 1) + + class AppTestState(AppTestCpythonExtensionBase): def test_frame_tstate_tracing(self): diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -15,21 +15,21 @@ @specialize.arg(3) def _build_app_tree(self, space, node, seq_maker, with_lineno, with_column): - if node.children is not None: - seq_w = [None]*(len(node.children) + 1) + if node.num_children(): + seq_w = [None]*(node.num_children() + 1) seq_w[0] = space.wrap(node.type) - for i in range(1, len(node.children) + 1): - seq_w[i] = self._build_app_tree(space, node.children[i - 1], + for i in range(1, node.num_children() + 1): + seq_w[i] = self._build_app_tree(space, node.get_child(i - 1), seq_maker, with_lineno, with_column) else: seq_w = [None]*(2 + with_lineno + with_column) seq_w[0] = space.wrap(node.type) - seq_w[1] = space.wrap(node.value) + seq_w[1] = space.wrap(node.get_value()) if with_lineno: - seq_w[2] = space.wrap(node.lineno) + seq_w[2] = space.wrap(node.get_lineno()) if with_column: - seq_w[3] = space.wrap(node.column) + seq_w[3] = space.wrap(node.get_column()) return seq_maker(seq_w) def descr_issuite(self, space): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -417,8 +417,11 @@ def test_math_sin_type(): ffi = FFI() - ffi.cdef("double sin(double);") - lib = verify(ffi, 'test_math_sin_type', '#include ') + ffi.cdef("double sin(double); void *xxtestfunc();") + lib = verify(ffi, 'test_math_sin_type', """ + #include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -428,7 +431,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__ == 'direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -62,6 +62,12 @@ raises(IndexError, d.remove, 'c') assert len(d) == 0 + def test_deque_unhashable(self): + from collections import Hashable + d = self.get_deque() + raises(TypeError, hash, d) + assert not isinstance(d, Hashable) + class AppTestDequeExtra: From pypy.commits at gmail.com Tue Apr 19 09:23:52 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 06:23:52 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: fix merge Message-ID: <57163168.10981c0a.92fd1.6b2e@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83772:3260adbeba4a Date: 2016-04-19 16:22 +0300 http://bitbucket.org/pypy/pypy/changeset/3260adbeba4a/ Log: fix merge diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -30,7 +30,7 @@ /* PyPy version as a string */ #define PYPY_VERSION "5.1.0" -#define PYPY_VERSION_NUM 0x05020000 +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object From pypy.commits at gmail.com Tue Apr 19 09:27:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Apr 2016 06:27:59 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: refactoring simplifies the logging of a trace. now (just for the purpose of the jitlog) there is a unique id for each traces which makes parsing also easier Message-ID: <5716325f.519d1c0a.6dfa.6872@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83773:37892687f8b9 Date: 2016-04-19 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/37892687f8b9/ Log: refactoring simplifies the logging of a trace. now (just for the purpose of the jitlog) there is a unique id for each traces which makes parsing also easier diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -544,7 +544,7 @@ looptoken._ll_function_addr = rawstart + functionpos if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, None, ops_offset=ops_offset, unique_id=rawstart) + log.write(inputargs, operations, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.teardown() @@ -611,7 +611,7 @@ frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, faildescr, ops_offset, unique_id=rawstart) + log.write(inputargs, operations, ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -478,10 +478,7 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - _log.write(inputargs, operations, None, name=name, unique_id=unique_id) - # TODO remove old - metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, - 'compiling', None, name, memo) + _log.write(inputargs, operations) return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, jd_id=jd_id, unique_id=unique_id, @@ -491,10 +488,7 @@ def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - _log.write(inputargs, operations, faildescr) - # TODO remove old - metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", - memo=memo) + _log.write(inputargs, operations) assert isinstance(faildescr, AbstractFailDescr) return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, original_loop_token, log=log, @@ -510,6 +504,7 @@ def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type, orig_inpargs, memo): + metainterp_sd.jitlog.start_new_trace(None, type == "entry bridge") forget_optimization_info(loop.operations) forget_optimization_info(loop.inputargs) vinfo = jitdriver_sd.virtualizable_info @@ -571,6 +566,7 @@ def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token, memo): + metainterp_sd.jitlog.start_new_trace(faildescr) forget_optimization_info(operations) forget_optimization_info(inputargs) if not we_are_translated(): diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -31,6 +31,7 @@ MARK_STITCH_BRIDGE = 0x19 MARK_JITLOG_COUNTER = 0x20 +MARK_START_TRACE = 0x21 MARK_JITLOG_HEADER = 0x23 MARK_JITLOG_DEBUG_MERGE_POINT = 0x24 @@ -84,6 +85,7 @@ def __init__(self): self.cintf = cintf.setup() self.memo = {} + self.trace_id = 0 def setup_once(self): if self.cintf.jitlog_enabled(): @@ -97,6 +99,20 @@ def finish(self): self.cintf.jitlog_teardown() + def start_new_trace(self, faildescr=None, entry_bridge=False): + if not self.cintf.jitlog_enabled(): + return + content = [encode_le_addr(self.trace_id)] + if faildescr: + content.append(encode_str('bridge')) + descrnmr = compute_unique_id(faildescr) + content.append(encode_le_addr(descrnmr)) + else: + content.append(encode_str('loop')) + content.append(encode_le_addr(int(entry_bridge))) + self.cintf._write_marked(MARK_START_TRACE, ''.join(content)) + self.trace_id += 1 + def _write_marked(self, mark, line): if not we_are_translated(): assert self.cintf.jitlog_enabled() @@ -127,7 +143,10 @@ self._write_marked(MARK_STITCH_BRIDGE, ''.join(lst)) class BaseLogTrace(object): - def write(self, args, ops, faildescr=None, ops_offset={}, name=None, unique_id=0): + def write_trace(self, trace): + return None + + def write(self, args, ops, ops_offset={}): return None EMPTY_TRACE_LOG = BaseLogTrace() @@ -143,25 +162,16 @@ self.mc = mc self.logger = logger - def write(self, args, ops, faildescr=None, ops_offset={}, - name=None, unique_id=0): + def write_trace(self, trace): + ops = [] + i = trace.get_iter() + while not i.done(): + ops.append(i.next()) + self.write(i.inputargs, ops) + + def write(self, args, ops, faildescr=None, ops_offset={}): log = self.logger - - if name is None: - name = '' - # write the initial tag - if faildescr is None: - string = encode_str('loop') + \ - encode_le_addr(unique_id) + \ - encode_str(name or '') - log._write_marked(self.tag, string) - else: - descr_number = compute_unique_id(faildescr) - string = encode_str('bridge') + \ - encode_le_addr(descr_number) + \ - encode_le_addr(unique_id) + \ - encode_str(name or '') - log._write_marked(self.tag, string) + log._write_marked(self.tag, encode_le_addr(self.trace_id)) # input args str_args = [self.var_to_str(arg) for arg in args] @@ -191,11 +201,11 @@ jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] filename, lineno, enclosed, index, opname = jd_sd.warmstate.get_location(op.getarglist()[3:]) line = [] - line.append(encode_str(filename)) + line.append(encode_str(filename or "")) line.append(encode_le_16bit(lineno)) - line.append(encode_str(enclosed)) + line.append(encode_str(enclosed or "")) line.append(encode_le_64bit(index)) - line.append(encode_str(opname)) + line.append(encode_str(opname or "")) log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, ''.join(line)) diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -54,9 +54,9 @@ """ debug_start("jit-optimize") try: - # TODO missing the unique id - # TODO log = metainterp_sd.jitlog.log_trace(MARK_TRACE, metainterp_sd, None) - # TODO log.write(inputargs, compile_data.operations) + # mark that a new trace has been started + log = metainterp_sd.jitlog.log_trace(MARK_TRACE, metainterp_sd, None) + log.write_trace(compile_data.trace) if compile_data.log_noopt: metainterp_sd.logger_noopt.log_loop_from_trace(compile_data.trace, memo=memo) if memo is None: diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -564,6 +564,7 @@ def make_driverhook_graphs(self): s_Str = annmodel.SomeString() + s_Str_None = annmodel.SomeString(can_be_None=True) s_Int = annmodel.SomeInteger() # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) @@ -580,7 +581,7 @@ jd._should_unroll_one_iteration_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.should_unroll_one_iteration, annmodel.s_Bool) - s_Tuple = annmodel.SomeTuple([s_Str, s_Int, s_Str, s_Int, s_Str]) + s_Tuple = annmodel.SomeTuple([s_Str_None, s_Int, s_Str_None, s_Int, s_Str_None]) jd._get_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_location, s_Tuple) annhelper.finish() From pypy.commits at gmail.com Tue Apr 19 12:39:34 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 09:39:34 -0700 (PDT) Subject: [pypy-commit] pypy py3k: winreg must be a builtin module on Windows Message-ID: <57165f46.8a37c20a.a6f42.70d0@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83774:4d87b3d82dc2 Date: 2016-04-19 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4d87b3d82dc2/ Log: winreg must be a builtin module on Windows fixes issue #2276 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -398,7 +398,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -582,6 +582,9 @@ # lives in pypy/module/exceptions, we rename it below for # sys.builtin_module_names bootstrap_modules = set(('sys', 'imp', 'builtins', 'exceptions')) + if sys.platform.startswith("win"): + self.setbuiltinmodule('_winreg') + bootstrap_modules.add('winreg') installed_builtin_modules = list(bootstrap_modules) exception_types_w = self.export_builtin_exceptions() @@ -1534,7 +1537,7 @@ """ if w_obj is unicode, call identifier_w() (i.e., return the UTF-8 encoded string). Else, call bytes_w(). - + Maybe we should kill str_w completely and manually substitute it with identifier_w/bytes_w at all call sites? """ From pypy.commits at gmail.com Tue Apr 19 15:23:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 12:23:08 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge py3k Message-ID: <5716859c.22c8c20a.f6d2d.ffffad2b@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83775:16fdc05dd0b4 Date: 2016-04-19 20:12 +0100 http://bitbucket.org/pypy/pypy/changeset/16fdc05dd0b4/ Log: hg merge py3k diff too long, truncating to 2000 out of 8726 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -111,23 +111,24 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross + Edd Barrett Andreas Stührk - Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -139,7 +140,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -170,9 +171,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -183,6 +184,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -217,7 +219,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -225,7 +226,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -240,7 +243,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -252,9 +254,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -291,6 +295,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -305,6 +310,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py --- a/lib-python/3/test/test_hash.py +++ b/lib-python/3/test/test_hash.py @@ -8,6 +8,7 @@ import sys import unittest from test.script_helper import assert_python_ok +from test.support import impl_detail, check_impl_detail from collections import Hashable IS_64BIT = sys.maxsize > 2**32 @@ -140,6 +141,7 @@ def get_hash_command(self, repr_): return 'print(hash(eval(%a)))' % repr_ + @impl_detail("PyPy does not support hash randomization", pypy=False) def get_hash(self, repr_, seed=None): env = os.environ.copy() env['__cleanenv'] = True # signal to assert_python not to do a copy @@ -161,6 +163,11 @@ self.assertNotEqual(run1, run2) class StringlikeHashRandomizationTests(HashRandomizationTests): + if check_impl_detail(pypy=True): + EMPTY_STRING_HASH = -1 + else: + EMPTY_STRING_HASH = 0 + def test_null_hash(self): # PYTHONHASHSEED=0 disables the randomized hash if IS_64BIT: @@ -194,21 +201,21 @@ repr_ = repr('abc') def test_empty_string(self): - self.assertEqual(hash(""), 0) + self.assertEqual(hash(""), self.EMPTY_STRING_HASH) class BytesHashRandomizationTests(StringlikeHashRandomizationTests, unittest.TestCase): repr_ = repr(b'abc') def test_empty_string(self): - self.assertEqual(hash(b""), 0) + self.assertEqual(hash(b""), self.EMPTY_STRING_HASH) class MemoryviewHashRandomizationTests(StringlikeHashRandomizationTests, unittest.TestCase): repr_ = "memoryview(b'abc')" def test_empty_string(self): - self.assertEqual(hash(memoryview(b"")), 0) + self.assertEqual(hash(memoryview(b"")), self.EMPTY_STRING_HASH) class DatetimeTests(HashRandomizationTests): def get_hash_command(self, repr_): diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -1728,6 +1728,7 @@ class LengthTransparency(unittest.TestCase): + @support.impl_detail("__length_hint__() API is undocumented") def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.5.2 +Version: 1.6.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.5.2" -__version_info__ = (1, 5, 2) +__version__ = "1.6.0" +__version_info__ = (1, 6, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.5.2" + "\ncompiled with cffi version: 1.6.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -299,6 +299,23 @@ """ return self._backend.string(cdata, maxlen) + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or @@ -721,6 +738,26 @@ raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1231,7 +1231,7 @@ if c == '\n': return '\\n' return '\\%03o' % ord(c) lines = [] - for line in s.splitlines(True): + for line in s.splitlines(True) or ['']: lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) return ' \\\n'.join(lines) @@ -1319,7 +1319,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1333,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1347,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1444,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1465,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: @@ -1484,4 +1492,7 @@ def typeof_disabled(*args, **kwds): raise NotImplementedError ffi._typeof = typeof_disabled + for name in dir(ffi): + if not name.startswith('_') and not hasattr(module.ffi, name): + setattr(ffi, name, NotImplemented) return module.lib diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -81,13 +81,13 @@ Simon Burton Martin Matusiak Konstantin Lopuhin + Stefano Rivera Wenzhu Man John Witulski Laurence Tratt Ivan Sichmann Freitas Greg Price Dario Bertini - Stefano Rivera Mark Pearse Simon Cross Andreas Stührk @@ -95,9 +95,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Spenser Bauman Jeremy Thurgood Paweł Piotr Przeradowski - Spenser Bauman + Tobias Pape Paul deGrandis Ilya Osadchiy marky1991 @@ -109,7 +110,7 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Tobias Pape + Mark Young Wanja Saatkamp Gerald Klix Mike Blume @@ -140,9 +141,9 @@ Yichao Yu Rocco Moretti Gintautas Miliauskas + Devin Jeanpierre Michael Twomey Lucian Branescu Mihaila - Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -153,6 +154,7 @@ Victor Stinner Andrews Medina anatoly techtonik + Sergey Matyunin Stuart Williams Jasper Schulz Christian Hudon @@ -187,7 +189,6 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan - Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -195,7 +196,9 @@ Pieter Zieschang Gabriel Lukas Vacek + Kunal Grover Andrew Dalke + Florin Papa Sylvain Thenault Jakub Stasiak Nathan Taylor @@ -210,7 +213,6 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner - Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -222,9 +224,11 @@ Artur Lisiecki Sergey Kishchenko Ignas Mikalajunas + Alecsandru Patrascu Christoph Gerum Martin Blais Lene Wagner + Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su Toni Mattis @@ -261,6 +265,7 @@ Akira Li Gustavo Niemeyer Stephan Busemann + florinpapa Rafał Gałczyński Matt Bogosian Christian Muirhead @@ -275,6 +280,7 @@ Boglarka Vezer Chris Pressey Buck Golemon + Diana Popa Konrad Delong Dinu Gherman Chris Lambacher diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.1.0.rst release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.1.0.rst whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.1.0.rst @@ -0,0 +1,136 @@ +======== +PyPy 5.1 +======== + +We have released PyPy 5.1, about a month after PyPy 5.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata, and we now fully support the IBM s390x +architecture. + +You can download the PyPy 5.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s960x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.0 released in March 2015) +========================================================= + +* New features: + + * A new jit backend for the IBM s390x, which was a large effort over the past + few months. + + * Add better support for PyUnicodeObject in the C-API compatibility layer + + * Support GNU/kFreeBSD Debian ports in vmprof + + * Add __pypy__._promote + + * Make attrgetter a single type for CPython compatibility + +* Bug Fixes + + * Catch exceptions raised in an exit function + + * Fix a corner case in the JIT + + * Fix edge cases in the cpyext refcounting-compatible semantics + + * Try harder to not emit NEON instructions on ARM processors without NEON + support + + * Improve the rpython posix module system interaction function calls + + * Detect a missing class function implementation instead of calling a random + function + + * Check that PyTupleObjects do not contain any NULLs at the + point of conversion to W_TupleObjects + + * In ctypes, fix _anonymous_ fields of instances + + * Fix JIT issue with unpack() on a Trace which contains half-written operations + + * Fix sandbox startup (a regression in 5.0) + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Implemented numpy.where for a single argument + + * Indexing by a numpy scalar now returns a scalar + + * Fix transpose(arg) when arg is a sequence + + * Refactor include file handling, now all numpy ndarray, ufunc, and umath + functions exported from libpypy.so are declared in pypy_numpy.h, which is + included only when building our fork of numpy + +* Performance improvements: + + * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting + + * Merge another round of improvements to the warmup performance + + * Cleanup history rewriting in pyjitpl + + * Remove the forced minor collection that occurs when rewriting the + assembler at the start of the JIT backend + +* Internal refactorings: + + * Use a simpler logger to speed up translation + + * Drop vestiges of Python 2.5 support in testing + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -0,0 +1,62 @@ +========================= +What's new in PyPy 5.1 +========================= + +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + +.. branch: remove-frame-forcing-in-executioncontext + +.. branch: rposix-for-3 + +Wrap more POSIX functions in `rpython.rlib.rposix`. + +.. branch: cleanup-history-rewriting + +A local clean-up in the JIT front-end. + +.. branch: jit-constptr-2 + +Remove the forced minor collection that occurs when rewriting the +assembler at the start of the JIT backend. This is done by emitting +the ConstPtrs in a separate table, and loading from the table. It +gives improved warm-up time and memory usage, and also removes +annoying special-purpose code for pinned pointers. + +.. branch: fix-jitlog + +.. branch: cleanup-includes + +Remove old uneeded numpy headers, what is left is only for testing. Also +generate pypy_numpy.h which exposes functions to directly use micronumpy +ndarray and ufuncs diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,42 +1,16 @@ ========================= -What's new in PyPy 5.0.+ +What's new in PyPy 5.1+ ========================= -.. this is a revision shortly after release-5.0 -.. startrev: b238b48f9138 - -.. branch: s390x-backend - -The jit compiler backend implementation for the s390x architecutre. -The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. -It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. - -.. branch: remove-py-log - -Replace py.log with something simpler, which should speed up logging - -.. branch: where_1_arg - -Implemented numpy.where for 1 argument (thanks sergem) - -.. branch: fix_indexing_by_numpy_int - -Implement yet another strange numpy indexing compatibility; indexing by a scalar -returns a scalar - -.. branch: fix_transpose_for_list_v3 - -Allow arguments to transpose to be sequences - -.. branch: jit-leaner-frontend - -Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces - -.. branch: win32-lib-name - -.. branch: remove-frame-forcing-in-executioncontext +.. this is a revision shortly after release-5.1 +.. startrev: 2180e1eaf6f6 .. branch: rposix-for-3 -Wrap more POSIX functions in `rpython.rlib.rposix`. +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -583,6 +583,9 @@ # lives in pypy/module/exceptions, we rename it below for # sys.builtin_module_names bootstrap_modules = set(('sys', 'imp', 'builtins', 'exceptions')) + if sys.platform.startswith("win"): + self.setbuiltinmodule('_winreg') + bootstrap_modules.add('winreg') installed_builtin_modules = list(bootstrap_modules) exception_types_w = self.export_builtin_exceptions() @@ -1052,6 +1055,9 @@ def newlist_int(self, list_i): return self.newlist([self.wrap(i) for i in list_i]) + def newlist_float(self, list_f): + return self.newlist([self.wrap(f) for f in list_f]) + def newlist_hint(self, sizehint): from pypy.objspace.std.listobject import make_empty_list_with_size return make_empty_list_with_size(self, sizehint) @@ -1509,7 +1515,7 @@ assert False # XXX rename/replace with code more like CPython getargs for buffers - def bufferstr_w(self, w_obj): + def bufferstr_w(self, w_obj, flags=BUF_SIMPLE): # Directly returns an interp-level str. Note that if w_obj is a # unicode string, this is different from str_w(buffer(w_obj)): # indeed, the latter returns a string with the raw bytes from @@ -1523,13 +1529,7 @@ except OperationError, e: if not e.match(self, self.w_TypeError): raise - try: - buf = w_obj.buffer_w(self, 0) - except BufferInterfaceNotFound: - raise oefmt(self.w_TypeError, - "'%T' does not support the buffer interface", w_obj) - else: - return buf.as_str() + return self.buffer_w(w_obj, flags).as_str() def str_or_None_w(self, w_obj): return None if self.is_none(w_obj) else self.str_w(w_obj) diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,9 +5,9 @@ # ____________________________________________________________ -def sorted(lst, key=None, reverse=False): +def sorted(iterable, key=None, reverse=False): "sorted(iterable, key=None, reverse=False) --> new sorted list" - sorted_lst = list(lst) + sorted_lst = list(iterable) sorted_lst.sort(key=key, reverse=reverse) return sorted_lst diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.5.2" +VERSION = "1.6.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -48,6 +48,7 @@ 'from_buffer': 'func.from_buffer', 'string': 'func.string', + 'unpack': 'func.unpack', 'buffer': 'cbuffer.buffer', 'memmove': 'func.memmove', diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -323,14 +323,18 @@ from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_int_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_int_items(ptr, length) return None def unpackiterable_float(self, space): from pypy.module._cffi_backend import ctypearray ctype = self.ctype if isinstance(ctype, ctypearray.W_CTypeArray): - return ctype.ctitem.unpack_list_of_float_items(self) + length = self.get_array_length() + with self as ptr: + return ctype.ctitem.unpack_list_of_float_items(ptr, length) return None @specialize.argtype(1) @@ -367,6 +371,25 @@ with self as ptr: return W_CDataGCP(self.space, ptr, self.ctype, self, w_destructor) + def unpack(self, length): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + space = self.space + if not self.ctype.is_nonfunc_pointer_or_array: + raise oefmt(space.w_TypeError, + "expected a pointer or array, got '%s'", + self.ctype.name) + if length < 0: + raise oefmt(space.w_ValueError, "'length' cannot be negative") + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + with self as ptr: + if not ptr: + raise oefmt(space.w_RuntimeError, + "cannot use unpack() on %s", + space.str_w(self.repr())) + w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) + return w_result + class W_CDataMem(W_CData): """This is used only by the results of cffi.cast('int', x) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -7,11 +7,12 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend import ctypeprim class W_CTypeArray(W_CTypePtrOrArray): diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -49,10 +49,10 @@ def is_unichar_ptr_or_array(self): return False - def unpack_list_of_int_items(self, cdata): + def unpack_list_of_int_items(self, ptr, length): return None - def unpack_list_of_float_items(self, cdata): + def unpack_list_of_float_items(self, ptr, length): return None def pack_list_of_items(self, cdata, w_ob): @@ -127,6 +127,21 @@ raise oefmt(space.w_TypeError, "string(): unexpected cdata '%s' argument", self.name) + def unpack_ptr(self, w_ctypeptr, ptr, length): + # generic implementation, when the type of items is not known to + # be one for which a fast-case exists + space = self.space + itemsize = self.size + if itemsize < 0: + raise oefmt(space.w_ValueError, + "'%s' points to items of unknown size", + w_ctypeptr.name) + result_w = [None] * length + for i in range(length): + result_w[i] = self.convert_to_object(ptr) + ptr = rffi.ptradd(ptr, itemsize) + return space.newlist(result_w) + def add(self, cdata, i): space = self.space raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -87,6 +87,13 @@ return self.space.wrapbytes(s) return W_CType.string(self, cdataobj, maxlen) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_int_items(ptr, length) + if result is not None: + return self.space.newlist_int(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + + class W_CTypePrimitiveCharOrUniChar(W_CTypePrimitive): _attrs_ = [] is_primitive_integer = True @@ -125,6 +132,10 @@ value = self._convert_to_char(w_ob) cdata[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + s = rffi.charpsize2str(ptr, length) + return self.space.wrapbytes(s) + # XXX explicitly use an integer type instead of lltype.UniChar here, # because for now the latter is defined as unsigned by RPython (even @@ -171,6 +182,10 @@ value = self._convert_to_unichar(w_ob) rffi.cast(rffi.CWCHARP, cdata)[0] = value + def unpack_ptr(self, w_ctypeptr, ptr, length): + u = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, ptr), length) + return self.space.wrap(u) + class W_CTypePrimitiveSigned(W_CTypePrimitive): _attrs_ = ['value_fits_long', 'value_smaller_than_long'] @@ -221,19 +236,16 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_signed_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.size == rffi.sizeof(rffi.LONG): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.LONGP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.LONGP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.value_smaller_than_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_list_from_raw_array(res, ptr, self.size) return res return None @@ -313,11 +325,10 @@ def write_raw_integer_data(self, w_cdata, value): w_cdata.write_raw_unsigned_data(value) - def unpack_list_of_int_items(self, w_cdata): + def unpack_list_of_int_items(self, ptr, length): if self.value_fits_long: - res = [0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) + res = [0] * length + misc.unpack_unsigned_list_from_raw_array(res, ptr, self.size) return res return None @@ -391,19 +402,16 @@ value = space.float_w(space.float(w_ob)) misc.write_raw_float_data(cdata, value, self.size) - def unpack_list_of_float_items(self, w_cdata): + def unpack_list_of_float_items(self, ptr, length): if self.size == rffi.sizeof(rffi.DOUBLE): from rpython.rlib.rrawarray import populate_list_from_raw_array res = [] - length = w_cdata.get_array_length() - with w_cdata as ptr: - buf = rffi.cast(rffi.DOUBLEP, ptr) - populate_list_from_raw_array(res, buf, length) + buf = rffi.cast(rffi.DOUBLEP, ptr) + populate_list_from_raw_array(res, buf, length) return res elif self.size == rffi.sizeof(rffi.FLOAT): - res = [0.0] * w_cdata.get_array_length() - with w_cdata as ptr: - misc.unpack_cfloat_list_from_raw_array(res, ptr) + res = [0.0] * length + misc.unpack_cfloat_list_from_raw_array(res, ptr) return res return None @@ -421,6 +429,12 @@ return True return W_CTypePrimitive.pack_list_of_items(self, cdata, w_ob) + def unpack_ptr(self, w_ctypeptr, ptr, length): + result = self.unpack_list_of_float_items(ptr, length) + if result is not None: + return self.space.newlist_float(result) + return W_CType.unpack_ptr(self, w_ctypeptr, ptr, length) + class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -542,6 +542,25 @@ return w_cdata.ctype.string(w_cdata, maxlen) + @unwrap_spec(w_cdata=W_CData, length=int) + def descr_unpack(self, w_cdata, length): + """Unpack an array of C data of the given length, +returning a Python string/unicode/list. + +If 'cdata' is a pointer to 'char', returns a byte string. +It does not stop at the first null. This is equivalent to: +ffi.buffer(cdata, length)[:] + +If 'cdata' is a pointer to 'wchar_t', returns a unicode string. +'length' is measured in wchar_t's; it is not the size in bytes. + +If 'cdata' is a pointer to anything else, returns a list of +'length' items. This is a faster equivalent to: +[cdata[i] for i in range(length)]""" + # + return w_cdata.unpack(length) + + def descr_sizeof(self, w_arg): """\ Return the size in bytes of the argument. @@ -611,6 +630,38 @@ return w_result + def descr_list_types(self): + """\ +Returns the user type names known to this FFI instance. +This returns a tuple containing three lists of names: +(typedef_names, names_of_structs, names_of_unions)""" + # + space = self.space + ctx = self.ctxobj.ctx + + lst1_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_typenames')): + s = rffi.charp2str(ctx.c_typenames[i].c_name) + lst1_w.append(space.wrap(s)) + + lst2_w = [] + lst3_w = [] + for i in range(rffi.getintfield(ctx, 'c_num_struct_unions')): + su = ctx.c_struct_unions[i] + if su.c_name[0] == '$': + continue + s = rffi.charp2str(su.c_name) + if rffi.getintfield(su, 'c_flags') & cffi_opcode.F_UNION: + lst_w = lst3_w + else: + lst_w = lst2_w + lst_w.append(space.wrap(s)) + + return space.newtuple([space.newlist(lst1_w), + space.newlist(lst2_w), + space.newlist(lst3_w)]) + + def descr_init_once(self, w_func, w_tag): """\ init_once(function, tag): run function() once. More precisely, @@ -731,6 +782,7 @@ getctype = interp2app(W_FFIObject.descr_getctype), init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), + list_types = interp2app(W_FFIObject.descr_list_types), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), @@ -739,4 +791,5 @@ sizeof = interp2app(W_FFIObject.descr_sizeof), string = interp2app(W_FFIObject.descr_string), typeof = interp2app(W_FFIObject.descr_typeof), + unpack = interp2app(W_FFIObject.descr_unpack), **_extras) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -78,6 +78,12 @@ # ____________________________________________________________ + at unwrap_spec(w_cdata=cdataobj.W_CData, length=int) +def unpack(space, w_cdata, length): + return w_cdata.unpack(length) + +# ____________________________________________________________ + def _get_types(space): return space.newtuple([space.gettypefor(cdataobj.W_CData), space.gettypefor(ctypeobj.W_CType)]) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.6.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -3514,3 +3514,72 @@ d = {} _get_common_types(d) assert d['bool'] == '_Bool' + +def test_unpack(): + BChar = new_primitive_type("char") + BArray = new_array_type(new_pointer_type(BChar), 10) # char[10] + p = newp(BArray, b"abc\x00def") + p0 = p + assert unpack(p, 10) == b"abc\x00def\x00\x00\x00" + assert unpack(p+1, 5) == b"bc\x00de" + BWChar = new_primitive_type("wchar_t") + BArray = new_array_type(new_pointer_type(BWChar), 10) # wchar_t[10] + p = newp(BArray, u"abc\x00def") + assert unpack(p, 10) == u"abc\x00def\x00\x00\x00" + + for typename, samples in [ + ("uint8_t", [0, 2**8-1]), + ("uint16_t", [0, 2**16-1]), + ("uint32_t", [0, 2**32-1]), + ("uint64_t", [0, 2**64-1]), + ("int8_t", [-2**7, 2**7-1]), + ("int16_t", [-2**15, 2**15-1]), + ("int32_t", [-2**31, 2**31-1]), + ("int64_t", [-2**63, 2**63-1]), + ("_Bool", [0, 1]), + ("float", [0.0, 10.5]), + ("double", [12.34, 56.78]), + ]: + BItem = new_primitive_type(typename) + BArray = new_array_type(new_pointer_type(BItem), 10) + p = newp(BArray, samples) + result = unpack(p, len(samples)) + assert result == samples + for i in range(len(samples)): + assert result[i] == p[i] and type(result[i]) is type(p[i]) + # + BInt = new_primitive_type("int") + py.test.raises(TypeError, unpack, p) + py.test.raises(TypeError, unpack, b"foobar", 6) + py.test.raises(TypeError, unpack, cast(BInt, 42), 1) + # + BPtr = new_pointer_type(BInt) + random_ptr = cast(BPtr, -424344) + other_ptr = cast(BPtr, 54321) + BArray = new_array_type(new_pointer_type(BPtr), None) + lst = unpack(newp(BArray, [random_ptr, other_ptr]), 2) + assert lst == [random_ptr, other_ptr] + # + BFunc = new_function_type((BInt, BInt), BInt, False) + BFuncPtr = new_pointer_type(BFunc) + lst = unpack(newp(new_array_type(BFuncPtr, None), 2), 2) + assert len(lst) == 2 + assert not lst[0] and not lst[1] + assert typeof(lst[0]) is BFunc + # + BStruct = new_struct_type("foo") + BStructPtr = new_pointer_type(BStruct) + e = py.test.raises(ValueError, unpack, cast(BStructPtr, 42), 5) + assert str(e.value) == "'foo *' points to items of unknown size" + complete_struct_or_union(BStruct, [('a1', BInt, -1), + ('a2', BInt, -1)]) + array_of_structs = newp(new_array_type(BStructPtr, None), [[4,5], [6,7]]) + lst = unpack(array_of_structs, 2) + assert typeof(lst[0]) is BStruct + assert lst[0].a1 == 4 and lst[1].a2 == 7 + # + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 0) + py.test.raises(RuntimeError, unpack, cast(new_pointer_type(BChar), 0), 10) + # + py.test.raises(ValueError, unpack, p0, -1) + py.test.raises(ValueError, unpack, p, -1) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -476,3 +476,11 @@ for i in range(5): raises(ValueError, ffi.init_once, do_init, "tag") assert seen == [1] * (i + 1) + + def test_unpack(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("char[]", b"abc\x00def") + assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" + p = ffi.new("int[]", [-123456789]) + assert ffi.unpack(p, 1) == [-123456789] diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1626,3 +1626,150 @@ # a case where 'onerror' is not callable raises(TypeError, ffi.def_extern(name='bar', onerror=42), lambda x: x) + + def test_extern_python_stdcall(self): + ffi, lib = self.prepare(""" + extern "Python" int __stdcall foo(int); + extern "Python" int WINAPI bar(int); + int (__stdcall * mycb1)(int); + int indirect_call(int); + """, 'test_extern_python_stdcall', """ + #ifndef _MSC_VER + # define __stdcall + #endif + static int (__stdcall * mycb1)(int); + static int indirect_call(int x) { + return mycb1(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x + 42 + @ffi.def_extern() + def bar(x): + return x + 43 + assert lib.foo(100) == 142 + assert lib.bar(100) == 143 + lib.mycb1 = lib.foo + assert lib.mycb1(200) == 242 + assert lib.indirect_call(300) == 342 + + def test_introspect_function(self): + ffi, lib = self.prepare(""" + float f1(double); + """, 'test_introspect_function', """ + float f1(double x) { return x; } + """) + assert dir(lib) == ['f1'] + FUNC = ffi.typeof(lib.f1) + assert FUNC.kind == 'function' + assert FUNC.args[0].cname == 'double' + assert FUNC.result.cname == 'float' + assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC + + def test_introspect_global_var(self): + ffi, lib = self.prepare(""" + float g1; + """, 'test_introspect_global_var', """ + float g1; + """) + assert dir(lib) == ['g1'] + FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATPTR.kind == 'pointer' + assert FLOATPTR.item.cname == 'float' + + def test_introspect_global_var_array(self): + ffi, lib = self.prepare(""" + float g1[100]; + """, 'test_introspect_global_var_array', """ + float g1[100]; + """) + assert dir(lib) == ['g1'] + FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1')) + assert FLOATARRAYPTR.kind == 'pointer' + assert FLOATARRAYPTR.item.kind == 'array' + assert FLOATARRAYPTR.item.length == 100 + assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item + + def test_introspect_integer_const(self): + ffi, lib = self.prepare("#define FOO 42", + 'test_introspect_integer_const', """ + #define FOO 42 + """) + assert dir(lib) == ['FOO'] + assert lib.FOO == ffi.integer_const('FOO') == 42 + + def test_introspect_typedef(self): + ffi, lib = self.prepare("typedef int foo_t;", + 'test_introspect_typedef', """ + typedef int foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'primitive' + assert ffi.typeof('foo_t').cname == 'int' + + def test_introspect_typedef_multiple(self): + ffi, lib = self.prepare(""" + typedef signed char a_t, c_t, g_t, b_t; + """, 'test_introspect_typedef_multiple', """ + typedef signed char a_t, c_t, g_t, b_t; + """) + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) + + def test_introspect_struct(self): + ffi, lib = self.prepare(""" + struct foo_s { int a; }; + """, 'test_introspect_struct', """ + struct foo_s { int a; }; + """) + assert ffi.list_types() == ([], ['foo_s'], []) + assert ffi.typeof('struct foo_s').kind == 'struct' + assert ffi.typeof('struct foo_s').cname == 'struct foo_s' + + def test_introspect_union(self): + ffi, lib = self.prepare(""" + union foo_s { int a; }; + """, 'test_introspect_union', """ + union foo_s { int a; }; + """) + assert ffi.list_types() == ([], [], ['foo_s']) + assert ffi.typeof('union foo_s').kind == 'union' + assert ffi.typeof('union foo_s').cname == 'union foo_s' + + def test_introspect_struct_and_typedef(self): + ffi, lib = self.prepare(""" + typedef struct { int a; } foo_t; + """, 'test_introspect_struct_and_typedef', """ + typedef struct { int a; } foo_t; + """) + assert ffi.list_types() == (['foo_t'], [], []) + assert ffi.typeof('foo_t').kind == 'struct' + assert ffi.typeof('foo_t').cname == 'foo_t' + + def test_introspect_included_type(self): + SOURCE = """ + typedef signed char schar_t; + struct sint_t { int x; }; + """ + ffi1, lib1 = self.prepare(SOURCE, + "test_introspect_included_type_parent", SOURCE) + ffi2, lib2 = self.prepare("", + "test_introspect_included_type", SOURCE, + includes=[ffi1]) + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) + + def test_introspect_order(self): + ffi, lib = self.prepare(""" + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """, "test_introspect_order", """ + union aaa { int a; }; typedef struct ccc { int a; } b; + union g { int a; }; typedef struct cc { int a; } bbb; + union aa { int a; }; typedef struct a { int a; } bb; + """) + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -2,7 +2,8 @@ import sys class AppTestWin32: - spaceconfig = dict(usemodules=('_multiprocessing',)) + spaceconfig = dict(usemodules=('_multiprocessing', + 'signal', '_rawffi', 'binascii')) def setup_class(cls): if sys.platform != "win32": diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -758,6 +758,7 @@ try: while 1: count += cli.send(b'foobar' * 70) + assert count < 100000 except timeout: pass t.recv(count) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -593,14 +593,6 @@ self.w_class = None self.method = method - if self.canoverflow: - assert self.bytes <= rffi.sizeof(rffi.ULONG) - if self.bytes == rffi.sizeof(rffi.ULONG) and not signed and \ - self.unwrap == 'int_w': - # Treat this type as a ULONG - self.unwrap = 'bigint_w' - self.canoverflow = False - def _freeze_(self): # hint for the annotator: track individual constant instances return True @@ -609,6 +601,14 @@ return self.unwrap == 'int_w' or self.unwrap == 'bigint_w' +if rffi.sizeof(rffi.UINT) == rffi.sizeof(rffi.ULONG): + # 32 bits: UINT can't safely overflow into a C long (rpython int) + # via int_w, handle it like ULONG below + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'bigint_w.touint') +else: + _UINTTypeCode = \ + TypeCode(rffi.UINT, 'int_w', True) types = { 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), @@ -616,7 +616,7 @@ 'h': TypeCode(rffi.SHORT, 'int_w', True, True), 'H': TypeCode(rffi.USHORT, 'int_w', True), 'i': TypeCode(rffi.INT, 'int_w', True, True), - 'I': TypeCode(rffi.UINT, 'int_w', True), + 'I': _UINTTypeCode, 'l': TypeCode(rffi.LONG, 'int_w', True, True), 'L': TypeCode(rffi.ULONG, 'bigint_w.touint'), 'q': TypeCode(rffi.LONGLONG, 'bigint_w.tolonglong', True, True), diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -77,14 +77,15 @@ except OverflowError: pass - for tc in 'BHIL': + for tc in 'BHILQ': a = self.array(tc) - vals = [0, 2 ** a.itemsize - 1] + itembits = a.itemsize * 8 + vals = [0, 2 ** itembits - 1] a.fromlist(vals) assert a.tolist() == vals a = self.array(tc.lower()) - vals = [-1 * (2 ** a.itemsize) // 2, (2 ** a.itemsize) // 2 - 1] + vals = [-1 * (2 ** itembits) // 2, (2 ** itembits) // 2 - 1] a.fromlist(vals) assert a.tolist() == vals diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,26 +144,14 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir, copy_numpy_headers): +def copy_header_files(dstdir): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): + for name in ["pypy_macros.h"] + FUNCTIONS_BY_HEADER.keys(): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - if copy_numpy_headers: - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) - - class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -234,7 +222,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', +DEFAULT_HEADER = 'pypy_decl.h' +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, gil=None, result_borrowed=False): """ Declares a function to be exported. @@ -268,6 +257,8 @@ func_name = func.func_name if header is not None: c_name = None + assert func_name not in FUNCTIONS, ( + "%s already registered" % func_name) else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, @@ -275,10 +266,6 @@ result_borrowed=result_borrowed) func.api_func = api_function - if header is not None: - assert func_name not in FUNCTIONS, ( - "%s already registered" % func_name) - if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) @@ -366,7 +353,8 @@ unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if header is not None: - FUNCTIONS[func_name] = api_function + if header == DEFAULT_HEADER: + FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. @@ -794,10 +782,11 @@ # Structure declaration code members = [] structindex = {} - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - members.append('%s (*%s)(%s);' % (restype, name, args)) - structindex[name] = len(structindex) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + restype, args = c_function_signature(db, func) + members.append('%s (*%s)(%s);' % (restype, name, args)) + structindex[name] = len(structindex) structmembers = '\n'.join(members) struct_declaration_code = """\ struct PyPyAPI { @@ -806,7 +795,8 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols) + functions = generate_decls_and_callbacks(db, export_symbols, + prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -823,6 +813,11 @@ prologue = ("#include \n" "#include \n" "#include \n") + if use_micronumpy: + prologue = ("#include \n" + "#include \n" + "#include \n" + "#include \n") code = (prologue + struct_declaration_code + global_code + @@ -898,13 +893,19 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - for name, func in FUNCTIONS.iteritems(): - if name.startswith('cpyext_'): # XXX hack - continue - pypyAPI[structindex[name]] = ctypes.cast( - ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - ctypes.c_void_p) - + #for name, func in FUNCTIONS.iteritems(): + # if name.startswith('cpyext_'): # XXX hack + # continue + # pypyAPI[structindex[name]] = ctypes.cast( + # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + # ctypes.c_void_p) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + if name.startswith('cpyext_'): # XXX hack + continue + pypyAPI[structindex[name]] = ctypes.cast( + ll2ctypes.lltype2ctypes(func.get_llhelper(space)), + ctypes.c_void_p) setup_va_functions(eci) setup_init_functions(eci, translating=False) @@ -997,18 +998,12 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True): +def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] decls = {} pypy_decls = decls['pypy_decl.h'] = [] - pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") - pypy_decls.append("#ifndef PYPY_STANDALONE\n") - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("extern \"C\" {") - pypy_decls.append("#endif\n") pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1018,19 +1013,28 @@ for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): if header_name not in decls: header = decls[header_name] = [] + header.append('#define Signed long /* xxx temporary fix */\n') + header.append('#define Unsigned unsigned long /* xxx temporary fix */\n') else: header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if header == DEFAULT_HEADER: + _name = name + else: + # this name is not included in pypy_macros.h + _name = mangle_name(prefix, name) + assert _name is not None, 'error converting %s' % name + header.append("#define %s %s" % (name, _name)) restype, args = c_function_signature(db, func) - header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, _name, args)) if api_struct: callargs = ', '.join('arg%d' % (i,) for i in range(len(func.argtypes))) if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ _pypyAPI.%s(%s); }" % (_name, callargs) else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + body = "{ return _pypyAPI.%s(%s); }" % (_name, callargs) functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) @@ -1047,13 +1051,10 @@ typ = 'PyObject*' pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) - pypy_decls.append('#undef Signed /* xxx temporary fix */\n') - pypy_decls.append('#undef Unsigned /* xxx temporary fix */\n') - pypy_decls.append("#ifdef __cplusplus") - pypy_decls.append("}") - pypy_decls.append("#endif") - pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") - pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") + for header_name in FUNCTIONS_BY_HEADER.keys(): + header = decls[header_name] + header.append('#undef Signed /* xxx temporary fix */\n') + header.append('#undef Unsigned /* xxx temporary fix */\n') for header_name, header_decls in decls.iteritems(): decl_h = udir.join(header_name) @@ -1162,7 +1163,8 @@ generate_macros(export_symbols, prefix='PyPy') - functions = generate_decls_and_callbacks(db, [], api_struct=False) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix='PyPy') code = "#include \n" + "\n".join(functions) eci = build_eci(False, export_symbols, code) @@ -1204,14 +1206,16 @@ PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', getter_only=True, declare_as_extern=False) - for name, func in FUNCTIONS.iteritems(): - newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) - deco(func.get_wrapper(space)) + for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + for name, func in header_functions.iteritems(): + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + relax=True) + deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include, use_micronumpy) + copy_header_files(trunk_include) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -139,7 +139,18 @@ /* Missing definitions */ #include "missing.h" -#include +/* The declarations of most API functions are generated in a separate file */ +/* Don't include them while building PyPy, RPython also generated signatures + * which are similar but not identical. */ +#ifndef PYPY_STANDALONE +#ifdef __cplusplus +extern "C" { +#endif + #include +#ifdef __cplusplus +} +#endif +#endif /* PYPY_STANDALONE */ /* Define macros for inline documentation. */ #define PyDoc_VAR(name) static char name[] diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h deleted file mode 100644 --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ /dev/null @@ -1,10 +0,0 @@ - - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - -#define import_array() -#define PyArray_New _PyArray_New - diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,8 +1,6 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 - * It will be copied by numpy/core/setup.py by install_data to - * site-packages/numpy/core/includes/numpy -*/ +/* NDArray object interface - S. H. Muller, 2013/07/26 */ +/* For testing ndarrayobject only */ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -10,13 +8,8 @@ extern "C" { #endif -#include "old_defines.h" #include "npy_common.h" -#include "__multiarray_api.h" - -#define NPY_UNUSED(x) x -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#include "ndarraytypes.h" /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -24,208 +17,20 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -#ifndef NDARRAYTYPES_H -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -/* data types copied from numpy/ndarraytypes.h - * keep numbers in sync with micronumpy.interp_dtype.DTypeCache - */ -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_OBJECT=17, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - /* - * New 1.6 types appended, may be integrated - * into the above in 2.0. - */ - NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, - - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256, /* leave room for characters */ - - /* The number of types not including the new 1.6 types */ - NPY_NTYPES_ABI_COMPATIBLE=21 -}; - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) -#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) || \ - ((type) == NPY_HALF)) -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) -#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) -#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) -#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) - - -/* flags */ -#define NPY_ARRAY_C_CONTIGUOUS 0x0001 -#define NPY_ARRAY_F_CONTIGUOUS 0x0002 -#define NPY_ARRAY_OWNDATA 0x0004 -#define NPY_ARRAY_FORCECAST 0x0010 -#define NPY_ARRAY_ENSURECOPY 0x0020 -#define NPY_ARRAY_ENSUREARRAY 0x0040 -#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 -#define NPY_ARRAY_ALIGNED 0x0100 -#define NPY_ARRAY_NOTSWAPPED 0x0200 -#define NPY_ARRAY_WRITEABLE 0x0400 -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 - -#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE) -#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ - NPY_ARRAY_WRITEABLE | \ - NPY_ARRAY_NOTSWAPPED) -#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_BEHAVED) -#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) -#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) -#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) -#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) -#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) -#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) -#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ - NPY_ARRAY_UPDATEIFCOPY) - -#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ - NPY_ARRAY_F_CONTIGUOUS | \ - NPY_ARRAY_ALIGNED) - -#define NPY_FARRAY NPY_ARRAY_FARRAY -#define NPY_CARRAY NPY_ARRAY_CARRAY - -#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) - -#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) -#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) - -#define PyArray_ISONESEGMENT(arr) (1) -#define PyArray_ISNOTSWAPPED(arr) (1) -#define PyArray_ISBYTESWAPPED(arr) (0) - -#endif - -#define NPY_INT8 NPY_BYTE -#define NPY_UINT8 NPY_UBYTE -#define NPY_INT16 NPY_SHORT -#define NPY_UINT16 NPY_USHORT -#define NPY_INT32 NPY_INT -#define NPY_UINT32 NPY_UINT -#define NPY_INT64 NPY_LONG -#define NPY_UINT64 NPY_ULONG -#define NPY_FLOAT32 NPY_FLOAT -#define NPY_FLOAT64 NPY_DOUBLE -#define NPY_COMPLEX32 NPY_CFLOAT -#define NPY_COMPLEX64 NPY_CDOUBLE - - -/* functions */ -#ifndef PyArray_NDIM - -#define PyArray_Check _PyArray_Check -#define PyArray_CheckExact _PyArray_CheckExact -#define PyArray_FLAGS _PyArray_FLAGS - -#define PyArray_NDIM _PyArray_NDIM -#define PyArray_DIM _PyArray_DIM -#define PyArray_STRIDE _PyArray_STRIDE -#define PyArray_SIZE _PyArray_SIZE -#define PyArray_ITEMSIZE _PyArray_ITEMSIZE -#define PyArray_NBYTES _PyArray_NBYTES -#define PyArray_TYPE _PyArray_TYPE -#define PyArray_DATA _PyArray_DATA - -#define PyArray_Size PyArray_SIZE -#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) - -#define PyArray_FromAny _PyArray_FromAny -#define PyArray_FromObject _PyArray_FromObject -#define PyArray_ContiguousFromObject PyArray_FromObject -#define PyArray_ContiguousFromAny PyArray_FromObject - -#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) -#define PyArray_FROM_OTF(obj, typenum, requirements) \ - PyArray_FromObject(obj, typenum, 0, 0) - -#define PyArray_New _PyArray_New -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData -#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning - -#define PyArray_EMPTY(nd, dims, type_num, fortran) \ - PyArray_SimpleNew(nd, dims, type_num) +/* functions defined in ndarrayobject.c*/ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) - -/* Don't use these in loops! */ - -#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0))) - -#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1))) - -#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ - (i)*PyArray_STRIDE(obj,0) + \ - (j)*PyArray_STRIDE(obj,1) + \ From pypy.commits at gmail.com Tue Apr 19 15:23:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 12:23:11 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: (pjenvey, ronan) Query the buffer protocol in space.fsencode_w, not Message-ID: <5716859f.865a1c0a.63b8.fffff74e@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83776:ad52aafa4b6e Date: 2016-04-19 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/ad52aafa4b6e/ Log: (pjenvey, ronan) Query the buffer protocol in space.fsencode_w, not space.bytes_w, and ensure that this is used everywhere in interp_posix.py. reverts 4b64950d0558898460fd9c7c126c085c1a6bdbfe diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,8 +231,7 @@ raise BufferInterfaceNotFound def bytes_w(self, space): - buffer = space.buffer_w(self, space.BUF_FULL_RO) - return buffer.as_str() + self._typed_unwrap_error(space, "bytes") def unicode_w(self, space): self._typed_unwrap_error(space, "string") @@ -1639,9 +1638,14 @@ return fsdecode(space, w_obj) def fsencode_w(self, w_obj): + from rpython.rlib import rstring if self.isinstance_w(w_obj, self.w_unicode): w_obj = self.fsencode(w_obj) - return self.bytes0_w(w_obj) + result = self.bufferstr_w(w_obj, self.BUF_FULL_RO) + if '\x00' in result: + raise oefmt(self.w_TypeError, + "argument must be a string without NUL characters") + return rstring.assert_str0(result) def fsdecode_w(self, w_obj): if self.isinstance_w(w_obj, self.w_bytes): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -64,7 +64,7 @@ self.w_obj = w_obj def as_bytes(self): - return self.space.bytes0_w(self.w_obj) + return self.space.fsencode_w(self.w_obj) def as_unicode(self): return self.space.fsdecode_w(self.w_obj) @@ -83,7 +83,7 @@ fname = FileEncoder(space, w_fname) return func(fname, *args) else: - fname = space.bytes0_w(w_fname) + fname = space.fsencode_w(w_fname) return func(fname, *args) return dispatch From pypy.commits at gmail.com Tue Apr 19 16:47:46 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 13:47:46 -0700 (PDT) Subject: [pypy-commit] buildbot default: try to fix 11de30c674f6 Message-ID: <57169972.96811c0a.445a8.19f2@mx.google.com> Author: mattip Branch: Changeset: r1000:575eedefb087 Date: 2016-04-19 23:47 +0300 http://bitbucket.org/pypy/buildbot/changeset/575eedefb087/ Log: try to fix 11de30c674f6 diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -702,9 +702,9 @@ workdir='.')) self.addStep(ShellCmd( description="copy ctypes resource cache", - # the || : ensures this always succeeds, eventually remove this step - command=['cp', '-rv', 'pypy-c/lib_pypy/ctypes_config_cache', 'build/lib_pypy', '||', ':'], - haltOnFailure=True, + # eventually remove this step, not needed after 5.1 + command=['cp', '-rv', 'pypy-c/lib_pypy/ctypes_config_cache', 'build/lib_pypy'], + haltOnFailure=False, workdir='.')) self.addStep(ShellCmd( description="copy cffi import libraries", From pypy.commits at gmail.com Tue Apr 19 17:14:22 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 14:14:22 -0700 (PDT) Subject: [pypy-commit] pypy default: update documentation for 5.1 release Message-ID: <57169fae.06d8c20a.4479a.ffffcb1c@mx.google.com> Author: mattip Branch: Changeset: r83777:00817db13cc3 Date: 2016-04-20 00:13 +0300 http://bitbucket.org/pypy/pypy/changeset/00817db13cc3/ Log: update documentation for 5.1 release diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -6,7 +6,7 @@ We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. +architecture. We also updated cffi_ to 1.6 You can download the PyPy 5.1 release here: @@ -26,6 +26,7 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _cffi: https://cffi.readthedocs.org What is PyPy? ============= @@ -92,6 +93,15 @@ * Fix sandbox startup (a regression in 5.0) + * Fix possible segfault for classes with mangled mro or __metaclass__ + + * Fix isinstance(deque(), Hashable) on the pure python deque + + * Fix an issue with forkpty() + + * Fix issue with GIL and C-API multithreading, we care if PyPyGILState_Ensure + is called before initializing the GIL where cpython allows unsafe behaviour + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy @@ -108,6 +118,8 @@ functions exported from libpypy.so are declared in pypy_numpy.h, which is included only when building our fork of numpy + * Add broadcast + * Performance improvements: * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting @@ -119,12 +131,16 @@ * Remove the forced minor collection that occurs when rewriting the assembler at the start of the JIT backend + * Port the resource module to cffi + * Internal refactorings: * Use a simpler logger to speed up translation * Drop vestiges of Python 2.5 support in testing + * Update rpython functions with ones needed for py3k + .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -60,3 +60,13 @@ Remove old uneeded numpy headers, what is left is only for testing. Also generate pypy_numpy.h which exposes functions to directly use micronumpy ndarray and ufuncs + +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,14 +3,5 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: 2180e1eaf6f6 +.. startrev: aa60332382a1 -.. branch: rposix-for-3 - -Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). -This updates the underlying rpython functions with the ones needed for the -py3k branch - -.. branch: numpy_broadcast - -Add broadcast to micronumpy From pypy.commits at gmail.com Tue Apr 19 17:16:00 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 14:16:00 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: try harder to remove ndarray macros and decls by adding the header name after # for all GLOBALS Message-ID: <5716a010.972e1c0a.99515.1f6c@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83778:932d99f41a9a Date: 2016-04-19 23:32 +0300 http://bitbucket.org/pypy/pypy/changeset/932d99f41a9a/ Log: try harder to remove ndarray macros and decls by adding the header name after # for all GLOBALS diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -494,11 +494,11 @@ ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), - '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), + '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), + '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'), + '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), + '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), } FORWARD_DECLS = [] @@ -548,7 +548,7 @@ 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject PyDictObject PyClassObject'''.split(): @@ -887,6 +887,9 @@ structindex = {} for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + # added only for the macro, not the decl + continue restype, args = c_function_signature(db, func) members.append('%s (*%s)(%s);' % (restype, name, args)) structindex[name] = len(structindex) @@ -903,7 +906,7 @@ global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): - if "#" in name: + if '#' in name: continue if typ == 'PyDateTime_CAPI*': continue @@ -927,7 +930,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code) + eci = build_eci(True, export_symbols, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -958,8 +961,8 @@ for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) - if name.endswith('#'): - name = name[:-1] + if '#' in name: + name = name.split('#')[0] isptr = False else: isptr = True @@ -1004,7 +1007,7 @@ # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): - if name.startswith('cpyext_'): # XXX hack + if name.startswith('cpyext_') or func is None: # XXX hack continue pypyAPI[structindex[name]] = ctypes.cast( ll2ctypes.lltype2ctypes(func.get_llhelper(space)), @@ -1076,10 +1079,14 @@ pypy_macros = [] renamed_symbols = [] for name in export_symbols: - name = name.replace("#", "") + if '#' in name: + name,header = name.split('#') + else: + header = pypy_decl newname = mangle_name(prefix, name) assert newname, name - pypy_macros.append('#define %s %s' % (name, newname)) + if header == pypy_decl: + pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) @@ -1124,6 +1131,8 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if not func: + continue if header == DEFAULT_HEADER: _name = name else: @@ -1149,12 +1158,15 @@ functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name) for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - name = name.replace("#", "") + if '#' in name: + name, header = name.split("#") typ = typ.replace("*", "") elif name.startswith('PyExc_'): typ = 'PyObject*' - pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + header = pypy_decl + if header != pypy_decl: + decls[header].append('#define %s %s' % (name, mangle_name(prefix, name))) + decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name)) for header_name in FUNCTIONS_BY_HEADER.keys(): header = decls[header_name] @@ -1185,7 +1197,7 @@ source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code): +def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1207,9 +1219,11 @@ # Generate definitions for global structures structs = ["#include "] + if use_micronumpy: + structs.append('#include ') for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - structs.append('%s %s;' % (typ[:-1], name[:-1])) + if '#' in name: + structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) elif name.startswith('PyExc_'): structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) @@ -1250,11 +1264,12 @@ use_micronumpy = space.config.objspace.usemodules.micronumpy if not use_micronumpy: return use_micronumpy - # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject - global GLOBALS, SYMBOLS_C, separate_module_files - GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + # import registers api functions by side-effect, we also need HEADER + from pypy.module.cpyext.ndarrayobject import HEADER + global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: + FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None + GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy @@ -1269,9 +1284,12 @@ functions = generate_decls_and_callbacks(db, [], api_struct=False, prefix='PyPy') - code = "#include \n" + "\n".join(functions) + code = "#include \n" + if use_micronumpy: + code += "#include " + code += "\n".join(functions) - eci = build_eci(False, export_symbols, code) + eci = build_eci(False, export_symbols, code, use_micronumpy) space.fromcache(State).install_dll(eci) @@ -1283,7 +1301,8 @@ lines = ['PyObject *pypy_static_pyobjs[] = {\n'] include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - if name.endswith('#'): + if '#' in name: + name = name.split('#')[0] assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') typ, name = typ[:-1], name[:-1] elif name.startswith('PyExc_'): From pypy.commits at gmail.com Tue Apr 19 18:43:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 15:43:57 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Add comment about the use of wrap_oserror() in utime() Message-ID: <5716b4ad.e5ecc20a.2b66c.ffffe642@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83779:3ad2cea602b3 Date: 2016-04-19 23:43 +0100 http://bitbucket.org/pypy/pypy/changeset/3ad2cea602b3/ Log: Add comment about the use of wrap_oserror() in utime() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1406,6 +1406,12 @@ rposix.futimens(path.as_fd, atime_s, atime_ns, mtime_s, mtime_ns) return except OSError as e: + # CPython's Modules/posixmodule.c::posix_utime() has this comment: + # /* Avoid putting the file name into the error here, + # as that may confuse the user into believing that + # something is wrong with the file, when it also + # could be the time stamp that gives a problem. */ + # so we use wrap_oserror() instead of wrap_oserror2() here raise wrap_oserror(space, e) if rposix.HAVE_UTIMENSAT: @@ -1424,6 +1430,7 @@ dir_fd=dir_fd, follow_symlinks=follow_symlinks) return except OSError as e: + # see comment above raise wrap_oserror(space, e) if not follow_symlinks: @@ -1436,6 +1443,7 @@ try: call_rposix(rposix.utime, path, None) except OSError as e: + # see comment above raise wrap_oserror(space, e) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" @@ -1451,6 +1459,7 @@ try: call_rposix(rposix.utime, path, (actime, modtime)) except OSError as e: + # see comment above raise wrap_oserror(space, e) From pypy.commits at gmail.com Tue Apr 19 18:48:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 15:48:16 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Close branch follow_symlinks Message-ID: <5716b5b0.89cbc20a.2172.ffffe440@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83780:6f95bf3f23be Date: 2016-04-19 23:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6f95bf3f23be/ Log: Close branch follow_symlinks From pypy.commits at gmail.com Tue Apr 19 18:48:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Apr 2016 15:48:37 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merged in follow_symlinks (pull request #428) Message-ID: <5716b5c5.82b71c0a.3578d.3631@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83781:33f8f738c7a4 Date: 2016-04-19 23:47 +0100 http://bitbucket.org/pypy/pypy/changeset/33f8f738c7a4/ Log: Merged in follow_symlinks (pull request #428) Finish implementing the 3.3 extensions to existing os functions. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1638,9 +1638,14 @@ return fsdecode(space, w_obj) def fsencode_w(self, w_obj): + from rpython.rlib import rstring if self.isinstance_w(w_obj, self.w_unicode): w_obj = self.fsencode(w_obj) - return self.bytes0_w(w_obj) + result = self.bufferstr_w(w_obj, self.BUF_FULL_RO) + if '\x00' in result: + raise oefmt(self.w_TypeError, + "argument must be a string without NUL characters") + return rstring.assert_str0(result) def fsdecode_w(self, w_obj): if self.isinstance_w(w_obj, self.w_bytes): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -64,7 +64,7 @@ self.w_obj = w_obj def as_bytes(self): - return self.space.bytes0_w(self.w_obj) + return self.space.fsencode_w(self.w_obj) def as_unicode(self): return self.space.fsdecode_w(self.w_obj) @@ -83,7 +83,7 @@ fname = FileEncoder(space, w_fname) return func(fname, *args) else: - fname = space.bytes0_w(w_fname) + fname = space.fsencode_w(w_fname) return func(fname, *args) return dispatch @@ -112,6 +112,57 @@ return func(fname1, fname2, *args) return dispatch + at specialize.arg(0) +def call_rposix(func, path, *args): + """Call a function that takes a filesystem path as its first argument""" + if path.as_unicode is not None: + return func(path.as_unicode, *args) + else: + path_b = path.as_bytes + assert path_b is not None + return func(path.as_bytes, *args) + + +class Path(object): + _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode', 'w_path'] + + def __init__(self, fd, bytes, unicode, w_path): + self.as_fd = fd + self.as_bytes = bytes + self.as_unicode = unicode + self.w_path = w_path + + at specialize.arg(2) +def _unwrap_path(space, w_value, allow_fd=True): + if space.is_none(w_value): + raise oefmt(space.w_TypeError, + "can't specify None for path argument") + if _WIN32: + try: + path_u = space.unicode_w(w_value) + return Path(-1, None, path_u, w_value) + except OperationError: + pass + try: + path_b = space.fsencode_w(w_value) + return Path(-1, path_b, None, w_value) + except OperationError: + if allow_fd: + fd = unwrap_fd(space, w_value, "string, bytes or integer") + return Path(fd, None, None, w_value) + raise oefmt(space.w_TypeError, "illegal type for path parameter") + +class _PathOrFd(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_path(space, w_value, allow_fd=True) + +class _JustPath(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_path(space, w_value, allow_fd=False) + +def path_or_fd(allow_fd=True): + return _PathOrFd if allow_fd else _JustPath + if hasattr(rposix, 'AT_FDCWD'): DEFAULT_DIR_FD = rposix.AT_FDCWD @@ -119,8 +170,20 @@ DEFAULT_DIR_FD = -100 DIR_FD_AVAILABLE = False -def unwrap_fd(space, w_value): - return space.c_int_w(w_value) + at specialize.arg(2) +def unwrap_fd(space, w_value, allowed_types='integer'): + try: + result = space.c_int_w(w_value) + except OperationError as e: + if not e.match(space, space.w_OverflowError): + raise oefmt(space.w_TypeError, + "argument should be %s, not %T", allowed_types, w_value) + else: + raise + if result == -1: + # -1 is used as sentinel value for not a fd + raise oefmt(space.w_ValueError, "invalid file descriptor: -1") + return result def _unwrap_dirfd(space, w_value): if space.is_none(w_value): @@ -354,8 +417,11 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) -def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + at unwrap_spec( + path=path_or_fd(allow_fd=True), + dir_fd=DirFD(rposix.HAVE_FSTATAT), + follow_symlinks=kwonly(bool)) +def stat(space, path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result Perform a stat system call on the given path. @@ -371,27 +437,43 @@ link points to. It is an error to use dir_fd or follow_symlinks when specifying path as an open file descriptor.""" + return do_stat(space, "stat", path, dir_fd, follow_symlinks) + + at specialize.arg(1) +def do_stat(space, funcname, path, dir_fd, follow_symlinks): + """Common implementation for stat() and lstat()""" try: - st = dispatch_filename(rposix_stat.stat, 0, - allow_fd_fn=rposix_stat.fstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "%s: can't specify both dir_fd and fd", funcname) + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "%s: cannot use fd and follow_symlinks together", funcname) + st = rposix_stat.fstat(path.as_fd) + elif follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.stat, path) + elif not follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.lstat, path) + elif rposix.HAVE_FSTATAT: + st = call_rposix(rposix_stat.fstatat, path, dir_fd, follow_symlinks) + else: + raise oefmt(space.w_NotImplementedError, + "%s: unsupported argument combination", funcname) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(available=False)) -def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_FSTATAT)) +def lstat(space, path, dir_fd=DEFAULT_DIR_FD): """lstat(path, *, dir_fd=None) -> stat result Like stat(), but do not follow symbolic links. Equivalent to stat(path, follow_symlinks=False).""" - - try: - st = dispatch_filename(rposix_stat.lstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - else: - return build_stat_result(space, st) + return do_stat(space, "lstat", path, dir_fd, False) class StatState(object): def __init__(self, space): @@ -432,7 +514,9 @@ On some platforms, path may also be specified as an open file descriptor. If this functionality is unavailable, using it raises an exception.""" try: - st = dispatch_filename(rposix_stat.statvfs)(space, w_path) + st = dispatch_filename( + rposix_stat.statvfs, + allow_fd_fn=rposix_stat.fstatvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: @@ -1054,8 +1138,10 @@ raise wrap_oserror(space, e) - at unwrap_spec(dir_fd=DirFD(rposix.HAVE_READLINKAT)) -def readlink(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_READLINKAT)) +def readlink(space, path, dir_fd=DEFAULT_DIR_FD): """readlink(path, *, dir_fd=None) -> path Return a string representing the path to which the symbolic link points. @@ -1064,20 +1150,15 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - is_unicode = space.isinstance_w(w_path, space.w_unicode) - if is_unicode: - path = space.fsencode_w(w_path) - else: - path = space.bytes0_w(w_path) try: if dir_fd == DEFAULT_DIR_FD: - result = rposix.readlink(path) + result = call_rposix(rposix.readlink, path) else: - result = rposix.readlinkat(path, dir_fd) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + result = call_rposix(rposix.readlinkat, path, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) w_result = space.wrapbytes(result) - if is_unicode: + if space.isinstance_w(path.w_path, space.w_unicode): return space.fsdecode(w_result) return w_result @@ -1258,9 +1339,11 @@ return space.wrap(ret) - at unwrap_spec(w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), + at unwrap_spec( + path=path_or_fd(allow_fd=rposix.HAVE_FUTIMENS), + w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) -def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): +def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. @@ -1290,47 +1373,11 @@ not space.is_w(w_ns, space.w_None)): raise oefmt(space.w_ValueError, "utime: you may specify either 'times' or 'ns' but not both") - - if rposix.HAVE_UTIMENSAT: - path = space.fsencode_w(w_path) - try: - _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks) - return - except OSError, e: - raise wrap_oserror2(space, e, w_path) - - if not follow_symlinks: - raise argument_unavailable(space, "utime", "follow_symlinks") - - if not space.is_w(w_ns, space.w_None): - raise oefmt(space.w_NotImplementedError, - "utime: 'ns' unsupported on this platform on PyPy") - if space.is_w(w_times, space.w_None): - try: - dispatch_filename(rposix.utime, 1)(space, w_path, None) - return - except OSError, e: - raise wrap_oserror2(space, e, w_path) - try: - msg = "utime() arg 2 must be a tuple (atime, mtime) or None" - args_w = space.fixedview(w_times) - if len(args_w) != 2: - raise OperationError(space.w_TypeError, space.wrap(msg)) - actime = space.float_w(args_w[0], allow_conversion=False) - modtime = space.float_w(args_w[1], allow_conversion=False) - dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap(msg)) - - -def _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks): + utime_now = False if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): atime_s = mtime_s = 0 - atime_ns = mtime_ns = rposix.UTIME_NOW + atime_ns = mtime_ns = 0 + utime_now = True elif not space.is_w(w_times, space.w_None): times_w = space.fixedview(w_times) if len(times_w) != 2: @@ -1346,9 +1393,75 @@ atime_s, atime_ns = convert_ns(space, args_w[0]) mtime_s, mtime_ns = convert_ns(space, args_w[1]) - rposix.utimensat( - path, atime_s, atime_ns, mtime_s, mtime_ns, - dir_fd=dir_fd, follow_symlinks=follow_symlinks) + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "utime: can't specify both dir_fd and fd") + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "utime: cannot use fd and follow_symlinks together") + if utime_now: + atime_ns = mtime_ns = rposix.UTIME_NOW + try: + rposix.futimens(path.as_fd, atime_s, atime_ns, mtime_s, mtime_ns) + return + except OSError as e: + # CPython's Modules/posixmodule.c::posix_utime() has this comment: + # /* Avoid putting the file name into the error here, + # as that may confuse the user into believing that + # something is wrong with the file, when it also + # could be the time stamp that gives a problem. */ + # so we use wrap_oserror() instead of wrap_oserror2() here + raise wrap_oserror(space, e) + + if rposix.HAVE_UTIMENSAT: + path_b = path.as_bytes + if path_b is None: + raise oefmt(space.w_NotImplementedError, + "utime: unsupported value for 'path'") + try: + if utime_now: + rposix.utimensat( + path_b, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + else: + rposix.utimensat( + path_b, atime_s, atime_ns, mtime_s, mtime_ns, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + return + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + + if not follow_symlinks: + raise argument_unavailable(space, "utime", "follow_symlinks") + + if not space.is_w(w_ns, space.w_None): + raise oefmt(space.w_NotImplementedError, + "utime: 'ns' unsupported on this platform on PyPy") + if utime_now: + try: + call_rposix(rposix.utime, path, None) + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + try: + msg = "utime() arg 2 must be a tuple (atime, mtime) or None" + args_w = space.fixedview(w_times) + if len(args_w) != 2: + raise OperationError(space.w_TypeError, space.wrap(msg)) + actime = space.float_w(args_w[0], allow_conversion=False) + modtime = space.float_w(args_w[1], allow_conversion=False) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap(msg)) + try: + call_rposix(rposix.utime, path, (actime, modtime)) + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + def convert_seconds(space, w_time): if space.isinstance_w(w_time, space.w_float): @@ -1745,13 +1858,19 @@ raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(path='str0') + at unwrap_spec(path=path_or_fd(allow_fd=hasattr(os, 'fpathconf'))) def pathconf(space, path, w_name): num = confname_w(space, w_name, os.pathconf_names) - try: - res = os.pathconf(path, num) - except OSError, e: - raise wrap_oserror(space, e) + if path.as_fd != -1: + try: + res = os.fpathconf(path.as_fd, num) + except OSError, e: + raise wrap_oserror(space, e) + else: + try: + res = os.pathconf(path.as_bytes, num) + except OSError, e: + raise wrap_oserror2(space, e, path.w_path) return space.wrap(res) def confstr(space, w_name): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -174,6 +174,10 @@ import stat st = self.posix.stat(".") assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(b".") + assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(bytearray(b".")) + assert stat.S_ISDIR(st.st_mode) st = self.posix.lstat(".") assert stat.S_ISDIR(st.st_mode) @@ -185,6 +189,12 @@ assert exc.value.errno == errno.ENOENT assert exc.value.filename == "nonexistentdir/nonexistentfile" + excinfo = raises(TypeError, self.posix.stat, None) + assert "can't specify None" in str(excinfo.value) + excinfo = raises(TypeError, self.posix.stat, 2.) + assert "should be string, bytes or integer, not float" in str(excinfo.value) + raises(ValueError, self.posix.stat, -1) + if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): st = self.posix.statvfs(".") @@ -250,7 +260,7 @@ try: self.posix.utime('qowieuqw/oeiu', arg) except OSError as e: - assert e.filename == 'qowieuqw/oeiu' + pass else: assert 0 From pypy.commits at gmail.com Tue Apr 19 19:28:59 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 19 Apr 2016 16:28:59 -0700 (PDT) Subject: [pypy-commit] pypy py3k: relax some assertions Message-ID: <5716bf3b.c7811c0a.7b51c.3e5e@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83782:375d80f87983 Date: 2016-04-19 14:31 -0700 http://bitbucket.org/pypy/pypy/changeset/375d80f87983/ Log: relax some assertions diff --git a/lib-python/3/test/test_inspect.py b/lib-python/3/test/test_inspect.py --- a/lib-python/3/test/test_inspect.py +++ b/lib-python/3/test/test_inspect.py @@ -1481,12 +1481,13 @@ def test_signature_on_builtin_function(self): with self.assertRaisesRegex(ValueError, 'not supported by signature'): inspect.signature(type) - with self.assertRaisesRegex(ValueError, 'not supported by signature'): - # support for 'wrapper_descriptor' - inspect.signature(type.__call__) - with self.assertRaisesRegex(ValueError, 'not supported by signature'): - # support for 'method-wrapper' - inspect.signature(min.__call__) + if check_impl_detail(pypy=False): + with self.assertRaisesRegex(ValueError, 'not supported by signature'): + # support for 'wrapper_descriptor' + inspect.signature(type.__call__) + with self.assertRaisesRegex(ValueError, 'not supported by signature'): + # support for 'method-wrapper' + inspect.signature(min.__call__) with self.assertRaisesRegex(ValueError, 'no signature found for builtin function'): # support for 'method-wrapper' @@ -1920,7 +1921,7 @@ def test_signature_unhashable(self): def foo(a): pass sig = inspect.signature(foo) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(sig) def test_signature_str(self): @@ -2029,7 +2030,7 @@ p = inspect.Parameter('foo', default=42, kind=inspect.Parameter.KEYWORD_ONLY) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(p) def test_signature_parameter_replace(self): @@ -2297,7 +2298,7 @@ def foo(a): pass ba = inspect.signature(foo).bind(1) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(ba) def test_signature_bound_arguments_equality(self): From pypy.commits at gmail.com Wed Apr 20 00:16:43 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 21:16:43 -0700 (PDT) Subject: [pypy-commit] pypy default: test, fix to not print mandlebrot to log files Message-ID: <571702ab.c9b0c20a.35adf.2785@mx.google.com> Author: mattip Branch: Changeset: r83783:fa743228fbdb Date: 2016-04-20 07:15 +0300 http://bitbucket.org/pypy/pypy/changeset/fa743228fbdb/ Log: test, fix to not print mandlebrot to log files diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -67,6 +67,8 @@ def dot(self): """Output a mandelbrot dot to the terminal.""" + if not isatty(): + return global wrote_dot if not wrote_dot: mandelbrot_driver.reset() diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py --- a/rpython/tool/test/test_ansi_print.py +++ b/rpython/tool/test/test_ansi_print.py @@ -65,6 +65,19 @@ assert output[3] == ('[test:WARNING] maybe?\n', (31,)) assert len(output[4][0]) == 1 # single character +def test_no_tty(): + log = ansi_print.AnsiLogger('test') + with FakeOutput(tty=False) as output: + log.dot() + log.dot() + log.WARNING('oops') + log.WARNING('maybe?') + log.dot() + assert len(output) == 2 + assert output[0] == ('[test:WARNING] oops\n', ()) + assert output[1] == ('[test:WARNING] maybe?\n', ()) + + def test_unknown_method_names(): log = ansi_print.AnsiLogger('test') with FakeOutput() as output: From pypy.commits at gmail.com Wed Apr 20 00:20:47 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 21:20:47 -0700 (PDT) Subject: [pypy-commit] pypy default: add s390x to repackage script Message-ID: <5717039f.03dd1c0a.196ff.708f@mx.google.com> Author: mattip Branch: Changeset: r83784:9d69be296de7 Date: 2016-04-20 07:20 +0300 http://bitbucket.org/pypy/pypy/changeset/9d69be296de7/ Log: add s390x to repackage script diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -9,7 +9,7 @@ # download source, assuming a tag for the release already exists, and repackage them. # The script should be run in an empty directory, i.e. /tmp/release_xxx -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 s390x do wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 From pypy.commits at gmail.com Wed Apr 20 00:26:47 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 19 Apr 2016 21:26:47 -0700 (PDT) Subject: [pypy-commit] pypy default: Added tag release-5.1 for changeset 3260adbeba4a Message-ID: <57170507.d3301c0a.9326b.72bd@mx.google.com> Author: mattip Branch: Changeset: r83785:95a1d447cd62 Date: 2016-04-20 07:25 +0300 http://bitbucket.org/pypy/pypy/changeset/95a1d447cd62/ Log: Added tag release-5.1 for changeset 3260adbeba4a diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -20,3 +20,4 @@ 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 +3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1 From pypy.commits at gmail.com Wed Apr 20 02:53:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Apr 2016 23:53:37 -0700 (PDT) Subject: [pypy-commit] cffi default: update Message-ID: <57172771.4412c30a.bc642.55fb@mx.google.com> Author: Armin Rigo Branch: Changeset: r2670:91c994324178 Date: 2016-04-20 08:54 +0200 http://bitbucket.org/cffi/cffi/changeset/91c994324178/ Log: update diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -12,10 +12,11 @@ * extern "Python+C" -* in API mode, ``help(lib.foo)`` returns a docstring containing the C - signature now. Note that ``help(lib)`` itself is still useless; I - haven't figured out the hacks needed to convince ``pydoc`` of - showing more. You can use ``dir(lib)`` but it is not most helpful. +* in API mode, ``lib.foo.__doc__`` contains the C signature now. On + CPython you can say ``help(lib.foo)``, but for some reason + ``help(lib)`` (or ``help(lib.foo)`` on PyPy) is still useless; I + haven't yet figured out the hacks needed to convince ``pydoc`` to + show more. (You can use ``dir(lib)`` but it is not most helpful.) v1.5.2 From pypy.commits at gmail.com Wed Apr 20 03:06:05 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 00:06:05 -0700 (PDT) Subject: [pypy-commit] pypy default: PyPyGILState_Ensure() is not fixed in default. It is only fixed in Message-ID: <57172a5d.10981c0a.92fd1.ffffa9b5@mx.google.com> Author: Armin Rigo Branch: Changeset: r83786:2e4ca5042b6b Date: 2016-04-20 09:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2e4ca5042b6b/ Log: PyPyGILState_Ensure() is not fixed in default. It is only fixed in cpyext-ext diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -75,6 +75,8 @@ * Fix a corner case in the JIT * Fix edge cases in the cpyext refcounting-compatible semantics + (more work on cpyext compatibility is coming in the ``cpyext-ext`` + branch, but isn't ready yet) * Try harder to not emit NEON instructions on ARM processors without NEON support @@ -99,9 +101,6 @@ * Fix an issue with forkpty() - * Fix issue with GIL and C-API multithreading, we care if PyPyGILState_Ensure - is called before initializing the GIL where cpython allows unsafe behaviour - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy From pypy.commits at gmail.com Wed Apr 20 03:33:45 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 00:33:45 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the hashes for pypy 5.1.0 Message-ID: <571730d9.46291c0a.dbffa.ffffb214@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r731:bf8c74046191 Date: 2016-04-20 09:27 +0200 http://bitbucket.org/pypy/pypy.org/changeset/bf8c74046191/ Log: update the hashes for pypy 5.1.0 diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -415,17 +415,18 @@ Here are the checksums for each of the downloads -pypy-5.0.1 md5:: +pypy-5.1.0 md5:: - 5544c118d270138125fec1ec5659ef80 pypy-5.0.1-linux-armel.tar.bz2 - 34d6cf783cf585bbfff1b394d2db9a26 pypy-5.0.1-linux-armhf-raring.tar.bz2 - 224546fb5999c4b08b2b1c51e40dc055 pypy-5.0.1-linux-armhf-raspbian.tar.bz2 - 3f05067352f25e23eae618dca96062a2 pypy-5.0.1-linux.tar.bz2 - 6a6b19f6c13b42f4ce9d0c0d892f597a pypy-5.0.1-linux64.tar.bz2 - bec87524ebc3f11c9c9817f64311ef65 pypy-5.0.1-osx64.tar.bz2 - 798c6e83536a5fa5ed7d6efb4d06db1a pypy-5.0.1-src.tar.bz2 - 928761075bcc2d01f9f884eeee105bd0 pypy-5.0.1-src.zip - 2e53db6766a718084c9327a6059f8ad7 pypy-5.0.1-win32.zip + 17baf9db5200559b9d6c45ec8f60ea48 pypy-5.1.0-linux-armel.tar.bz2 + c0f360b601cd723031c0edc18b62f118 pypy-5.1.0-linux-armhf-raring.tar.bz2 + 27e5e98ccbca5ebb5933147556a46f77 pypy-5.1.0-linux-armhf-raspbian.tar.bz2 + 224d1f124393c96c98b9acbaf4f92078 pypy-5.1.0-linux.tar.bz2 + 2a58aa928ae1cabc6a3309cf98f6182e pypy-5.1.0-linux64.tar.bz2 + 7f546940acb3ceebb5967697a9b05b65 pypy-5.1.0-osx64.tar.bz2 + f9362ffc2946efcaadcc40fdb2c43df7 pypy-5.1.0-s390x.tar.bz2 + d0a76859c83fb0427674273977086cb2 pypy-5.1.0-src.tar.bz2 + 204273a21dbf71c0827966265c40eb7a pypy-5.1.0-src.zip + a1710ae6f15b567bf3c8fd608553ad48 pypy-5.1.0-win32.zip pypy3-2.4.0 md5:: @@ -446,33 +447,34 @@ 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2 -pypy-5.0.1 sha1:: - - d2df9030c670e178e2ee9b99934174184fe8aa1c pypy-5.0.1-linux-armel.tar.bz2 - 89534b3b09336165bf706a459f170ae3628da891 pypy-5.0.1-linux-armhf-raring.tar.bz2 - ecce668b3ec9d1a5d70e99ea4d0ce7491ca860e5 pypy-5.0.1-linux-armhf-raspbian.tar.bz2 - b814bb1b70b39c1e601a15e8bb809f525d6ef04d pypy-5.0.1-linux.tar.bz2 - 26f6bdada77adb2f79bce97513fdb58a91e6e967 pypy-5.0.1-linux64.tar.bz2 - 54eae1b3da6c29ba4bc5db35b89c23e6080a6d09 pypy-5.0.1-osx64.tar.bz2 - e96dad1562c4a91b26612f0fad0e70d0635399ed pypy-5.0.1-src.tar.bz2 - f7e4cda496244eefc50323704c48c10b568937cf pypy-5.0.1-src.zip - f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49 pypy-5.0.1-win32.zip +pypy-5.1.0 sha1:: -pypy-5.0.1 sha256:: + 114d4f981956b83cfbc0a3c819fdac0b0550cd82 pypy-5.1.0-linux-armel.tar.bz2 + e3060f8fa765c317ec1ad6923f9ea595b9d411c3 pypy-5.1.0-linux-armhf-raring.tar.bz2 + 8943448afd1fd3e89be0575f69c6f3be69f2efbc pypy-5.1.0-linux-armhf-raspbian.tar.bz2 + 229e7dbc130d2cc92be9d1cde88f2d6f7f28621b pypy-5.1.0-linux.tar.bz2 + c959524ce180f801bdbcbee4ca038309e1c771dd pypy-5.1.0-linux64.tar.bz2 + 216a52e44f3642176cf05fc3b4c6e2cf8981e400 pypy-5.1.0-osx64.tar.bz2 + b696059359a780ad3c2641b14c989021d93015e8 pypy-5.1.0-s390x.tar.bz2 + c9c497836e6235af9fee2a98e4aeaa2bc3a29550 pypy-5.1.0-src.tar.bz2 + a184ef5ada93d53e8dc4a9850a9ed764bd661d7b pypy-5.1.0-src.zip + 4daba0932afcc4755d93d55aa3cbdd851da9198d pypy-5.1.0-win32.zip - 17d55804b2253acd9de42276d756d4a08b7d1d2da09ef81dd325e14b18a1bcda pypy-5.0.1-linux-armel.tar.bz2 - 1e9146978cc7e7bd30683a518f304a824db7b9b1c6fae5e866eb703684ba3c98 pypy-5.0.1-linux-armhf-raring.tar.bz2 - 338d1c32c1326e6321b222ae357711b38c4a0ffddf020c2a35536b5f69376e28 pypy-5.0.1-linux-armhf-raspbian.tar.bz2 - 4b9a294033f917a1674c9ddcb2e7e8d32c4f4351f8216fd1fe23f6d2ad2b1a36 pypy-5.0.1-linux.tar.bz2 - 1b1363a48edd1c1b31ca5e995987eda3d460a3404f36c3bb2dd9f52c93eecff5 pypy-5.0.1-linux64.tar.bz2 - 6ebdb9d91203f053b38e3c21841c11a72f416dc185f7b3b7c908229df15e924a pypy-5.0.1-osx64.tar.bz2 - 1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05 pypy-5.0.1-src.tar.bz2 - 6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f pypy-5.0.1-src.zip - c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730 pypy-5.0.1-win32.zip +pypy-5.1.0 sha256:: + + ea7017449ff0630431866423220c3688fc55c1a0b80a96af0ae138dd0751b81c pypy-5.1.0-linux-armel.tar.bz2 + a3e13083591bccc301fb974ff0a6c7e4ab4e611e4b31c0932898b981c794462b pypy-5.1.0-linux-armhf-raring.tar.bz2 + 3bfcd251b4f3fd1a09520b2741c647c364d16d50c82b813732a78ac60ccb2b69 pypy-5.1.0-linux-armhf-raspbian.tar.bz2 + 2f6c521b5b3c1082eab58be78655aa01ec400d19baeec93c455864a7483b8744 pypy-5.1.0-linux.tar.bz2 + 0e8913351d043a50740b98cb89d99852b8bd6d11225a41c8abfc0baf7084cbf6 pypy-5.1.0-linux64.tar.bz2 + 7e270c66347158dd794c101c4817f742f760ed805aa0d10abe19ba4a78a75118 pypy-5.1.0-osx64.tar.bz2 + 096827f2cb041f9decc5a2b0b8fc6b5fe0748f229b0419fd73982e0714a292cd pypy-5.1.0-s390x.tar.bz2 + 16bab9501e942c0704abbf9cd6c4e950c6a76dc226cf1e447ea084916aef4714 pypy-5.1.0-src.tar.bz2 + afc1c72651c90418b57692a5628481dd09a3d3172765fd206e8bcdac7b1bf02d pypy-5.1.0-src.zip + 044e7f35223a443412b5948740e60e93069a6f8b0a72053cc9d472874bb1b6cc pypy-5.1.0-win32.zip 3373b1d51fc610b962e0b535087073f2cc921ab0269ba2896b140ab4a56588fd pypy-5.0.1++-ppc64.tar.bz2 53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af pypy-5.0.1+-ppc64le.tar.bz2 - pypy3-2.4.0 sha1:: 7d715742f6929351b310a2ca3b924cab35913089 pypy3-2.4.0-linux64.tar.bz2 From pypy.commits at gmail.com Wed Apr 20 05:30:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Apr 2016 02:30:46 -0700 (PDT) Subject: [pypy-commit] pypy default: typo in release notes Message-ID: <57174c46.10921c0a.54913.ffffe9b4@mx.google.com> Author: Richard Plangger Branch: Changeset: r83787:1bb1fd1183fb Date: 2016-04-20 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/1bb1fd1183fb/ Log: typo in release notes diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -47,7 +47,7 @@ * big- and little-endian variants of **PPC64** running Linux, - * **s960x** running Linux + * **s390x** running Linux .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org From pypy.commits at gmail.com Wed Apr 20 09:57:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 20 Apr 2016 06:57:48 -0700 (PDT) Subject: [pypy-commit] pypy stat_ns: hg merge py3k Message-ID: <57178adc.2413c30a.46a1c.083a@mx.google.com> Author: Ronan Lamy Branch: stat_ns Changeset: r83788:8dfa459eefd8 Date: 2016-04-20 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/8dfa459eefd8/ Log: hg merge py3k diff --git a/lib-python/3/test/test_inspect.py b/lib-python/3/test/test_inspect.py --- a/lib-python/3/test/test_inspect.py +++ b/lib-python/3/test/test_inspect.py @@ -1481,12 +1481,13 @@ def test_signature_on_builtin_function(self): with self.assertRaisesRegex(ValueError, 'not supported by signature'): inspect.signature(type) - with self.assertRaisesRegex(ValueError, 'not supported by signature'): - # support for 'wrapper_descriptor' - inspect.signature(type.__call__) - with self.assertRaisesRegex(ValueError, 'not supported by signature'): - # support for 'method-wrapper' - inspect.signature(min.__call__) + if check_impl_detail(pypy=False): + with self.assertRaisesRegex(ValueError, 'not supported by signature'): + # support for 'wrapper_descriptor' + inspect.signature(type.__call__) + with self.assertRaisesRegex(ValueError, 'not supported by signature'): + # support for 'method-wrapper' + inspect.signature(min.__call__) with self.assertRaisesRegex(ValueError, 'no signature found for builtin function'): # support for 'method-wrapper' @@ -1920,7 +1921,7 @@ def test_signature_unhashable(self): def foo(a): pass sig = inspect.signature(foo) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(sig) def test_signature_str(self): @@ -2029,7 +2030,7 @@ p = inspect.Parameter('foo', default=42, kind=inspect.Parameter.KEYWORD_ONLY) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(p) def test_signature_parameter_replace(self): @@ -2297,7 +2298,7 @@ def foo(a): pass ba = inspect.signature(foo).bind(1) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(ba) def test_signature_bound_arguments_equality(self): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -398,7 +398,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -582,6 +582,9 @@ # lives in pypy/module/exceptions, we rename it below for # sys.builtin_module_names bootstrap_modules = set(('sys', 'imp', 'builtins', 'exceptions')) + if sys.platform.startswith("win"): + self.setbuiltinmodule('_winreg') + bootstrap_modules.add('winreg') installed_builtin_modules = list(bootstrap_modules) exception_types_w = self.export_builtin_exceptions() @@ -1534,7 +1537,7 @@ """ if w_obj is unicode, call identifier_w() (i.e., return the UTF-8 encoded string). Else, call bytes_w(). - + Maybe we should kill str_w completely and manually substitute it with identifier_w/bytes_w at all call sites? """ @@ -1635,9 +1638,14 @@ return fsdecode(space, w_obj) def fsencode_w(self, w_obj): + from rpython.rlib import rstring if self.isinstance_w(w_obj, self.w_unicode): w_obj = self.fsencode(w_obj) - return self.bytes0_w(w_obj) + result = self.bufferstr_w(w_obj, self.BUF_FULL_RO) + if '\x00' in result: + raise oefmt(self.w_TypeError, + "argument must be a string without NUL characters") + return rstring.assert_str0(result) def fsdecode_w(self, w_obj): if self.isinstance_w(w_obj, self.w_bytes): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -64,7 +64,7 @@ self.w_obj = w_obj def as_bytes(self): - return self.space.bytes0_w(self.w_obj) + return self.space.fsencode_w(self.w_obj) def as_unicode(self): return self.space.fsdecode_w(self.w_obj) @@ -83,7 +83,7 @@ fname = FileEncoder(space, w_fname) return func(fname, *args) else: - fname = space.bytes0_w(w_fname) + fname = space.fsencode_w(w_fname) return func(fname, *args) return dispatch @@ -112,6 +112,57 @@ return func(fname1, fname2, *args) return dispatch + at specialize.arg(0) +def call_rposix(func, path, *args): + """Call a function that takes a filesystem path as its first argument""" + if path.as_unicode is not None: + return func(path.as_unicode, *args) + else: + path_b = path.as_bytes + assert path_b is not None + return func(path.as_bytes, *args) + + +class Path(object): + _immutable_fields_ = ['as_fd', 'as_bytes', 'as_unicode', 'w_path'] + + def __init__(self, fd, bytes, unicode, w_path): + self.as_fd = fd + self.as_bytes = bytes + self.as_unicode = unicode + self.w_path = w_path + + at specialize.arg(2) +def _unwrap_path(space, w_value, allow_fd=True): + if space.is_none(w_value): + raise oefmt(space.w_TypeError, + "can't specify None for path argument") + if _WIN32: + try: + path_u = space.unicode_w(w_value) + return Path(-1, None, path_u, w_value) + except OperationError: + pass + try: + path_b = space.fsencode_w(w_value) + return Path(-1, path_b, None, w_value) + except OperationError: + if allow_fd: + fd = unwrap_fd(space, w_value, "string, bytes or integer") + return Path(fd, None, None, w_value) + raise oefmt(space.w_TypeError, "illegal type for path parameter") + +class _PathOrFd(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_path(space, w_value, allow_fd=True) + +class _JustPath(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_path(space, w_value, allow_fd=False) + +def path_or_fd(allow_fd=True): + return _PathOrFd if allow_fd else _JustPath + if hasattr(rposix, 'AT_FDCWD'): DEFAULT_DIR_FD = rposix.AT_FDCWD @@ -119,8 +170,20 @@ DEFAULT_DIR_FD = -100 DIR_FD_AVAILABLE = False -def unwrap_fd(space, w_value): - return space.c_int_w(w_value) + at specialize.arg(2) +def unwrap_fd(space, w_value, allowed_types='integer'): + try: + result = space.c_int_w(w_value) + except OperationError as e: + if not e.match(space, space.w_OverflowError): + raise oefmt(space.w_TypeError, + "argument should be %s, not %T", allowed_types, w_value) + else: + raise + if result == -1: + # -1 is used as sentinel value for not a fd + raise oefmt(space.w_ValueError, "invalid file descriptor: -1") + return result def _unwrap_dirfd(space, w_value): if space.is_none(w_value): @@ -366,8 +429,11 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) -def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + at unwrap_spec( + path=path_or_fd(allow_fd=True), + dir_fd=DirFD(rposix.HAVE_FSTATAT), + follow_symlinks=kwonly(bool)) +def stat(space, path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result Perform a stat system call on the given path. @@ -383,27 +449,43 @@ link points to. It is an error to use dir_fd or follow_symlinks when specifying path as an open file descriptor.""" + return do_stat(space, "stat", path, dir_fd, follow_symlinks) + + at specialize.arg(1) +def do_stat(space, funcname, path, dir_fd, follow_symlinks): + """Common implementation for stat() and lstat()""" try: - st = dispatch_filename(rposix_stat.stat, 0, - allow_fd_fn=rposix_stat.fstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "%s: can't specify both dir_fd and fd", funcname) + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "%s: cannot use fd and follow_symlinks together", funcname) + st = rposix_stat.fstat(path.as_fd) + elif follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.stat, path) + elif not follow_symlinks and dir_fd == DEFAULT_DIR_FD: + st = call_rposix(rposix_stat.lstat, path) + elif rposix.HAVE_FSTATAT: + st = call_rposix(rposix_stat.fstatat, path, dir_fd, follow_symlinks) + else: + raise oefmt(space.w_NotImplementedError, + "%s: unsupported argument combination", funcname) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD(available=False)) -def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_FSTATAT)) +def lstat(space, path, dir_fd=DEFAULT_DIR_FD): """lstat(path, *, dir_fd=None) -> stat result Like stat(), but do not follow symbolic links. Equivalent to stat(path, follow_symlinks=False).""" - - try: - st = dispatch_filename(rposix_stat.lstat)(space, w_path) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - else: - return build_stat_result(space, st) + return do_stat(space, "lstat", path, dir_fd, False) class StatState(object): def __init__(self, space): @@ -444,7 +526,9 @@ On some platforms, path may also be specified as an open file descriptor. If this functionality is unavailable, using it raises an exception.""" try: - st = dispatch_filename(rposix_stat.statvfs)(space, w_path) + st = dispatch_filename( + rposix_stat.statvfs, + allow_fd_fn=rposix_stat.fstatvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: @@ -1066,8 +1150,10 @@ raise wrap_oserror(space, e) - at unwrap_spec(dir_fd=DirFD(rposix.HAVE_READLINKAT)) -def readlink(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path=path_or_fd(allow_fd=False), + dir_fd=DirFD(rposix.HAVE_READLINKAT)) +def readlink(space, path, dir_fd=DEFAULT_DIR_FD): """readlink(path, *, dir_fd=None) -> path Return a string representing the path to which the symbolic link points. @@ -1076,20 +1162,15 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - is_unicode = space.isinstance_w(w_path, space.w_unicode) - if is_unicode: - path = space.fsencode_w(w_path) - else: - path = space.bytes0_w(w_path) try: if dir_fd == DEFAULT_DIR_FD: - result = rposix.readlink(path) + result = call_rposix(rposix.readlink, path) else: - result = rposix.readlinkat(path, dir_fd) - except OSError, e: - raise wrap_oserror2(space, e, w_path) + result = call_rposix(rposix.readlinkat, path, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, path.w_path) w_result = space.wrapbytes(result) - if is_unicode: + if space.isinstance_w(path.w_path, space.w_unicode): return space.fsdecode(w_result) return w_result @@ -1270,9 +1351,11 @@ return space.wrap(ret) - at unwrap_spec(w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), + at unwrap_spec( + path=path_or_fd(allow_fd=rposix.HAVE_FUTIMENS), + w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) -def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): +def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. @@ -1302,47 +1385,11 @@ not space.is_w(w_ns, space.w_None)): raise oefmt(space.w_ValueError, "utime: you may specify either 'times' or 'ns' but not both") - - if rposix.HAVE_UTIMENSAT: - path = space.fsencode_w(w_path) - try: - _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks) - return - except OSError, e: - raise wrap_oserror2(space, e, w_path) - - if not follow_symlinks: - raise argument_unavailable(space, "utime", "follow_symlinks") - - if not space.is_w(w_ns, space.w_None): - raise oefmt(space.w_NotImplementedError, - "utime: 'ns' unsupported on this platform on PyPy") - if space.is_w(w_times, space.w_None): - try: - dispatch_filename(rposix.utime, 1)(space, w_path, None) - return - except OSError, e: - raise wrap_oserror2(space, e, w_path) - try: - msg = "utime() arg 2 must be a tuple (atime, mtime) or None" - args_w = space.fixedview(w_times) - if len(args_w) != 2: - raise OperationError(space.w_TypeError, space.wrap(msg)) - actime = space.float_w(args_w[0], allow_conversion=False) - modtime = space.float_w(args_w[1], allow_conversion=False) - dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) - except OSError, e: - raise wrap_oserror2(space, e, w_path) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, space.wrap(msg)) - - -def _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks): + utime_now = False if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): atime_s = mtime_s = 0 - atime_ns = mtime_ns = rposix.UTIME_NOW + atime_ns = mtime_ns = 0 + utime_now = True elif not space.is_w(w_times, space.w_None): times_w = space.fixedview(w_times) if len(times_w) != 2: @@ -1358,9 +1405,75 @@ atime_s, atime_ns = convert_ns(space, args_w[0]) mtime_s, mtime_ns = convert_ns(space, args_w[1]) - rposix.utimensat( - path, atime_s, atime_ns, mtime_s, mtime_ns, - dir_fd=dir_fd, follow_symlinks=follow_symlinks) + if path.as_fd != -1: + if dir_fd != DEFAULT_DIR_FD: + raise oefmt(space.w_ValueError, + "utime: can't specify both dir_fd and fd") + if not follow_symlinks: + raise oefmt(space.w_ValueError, + "utime: cannot use fd and follow_symlinks together") + if utime_now: + atime_ns = mtime_ns = rposix.UTIME_NOW + try: + rposix.futimens(path.as_fd, atime_s, atime_ns, mtime_s, mtime_ns) + return + except OSError as e: + # CPython's Modules/posixmodule.c::posix_utime() has this comment: + # /* Avoid putting the file name into the error here, + # as that may confuse the user into believing that + # something is wrong with the file, when it also + # could be the time stamp that gives a problem. */ + # so we use wrap_oserror() instead of wrap_oserror2() here + raise wrap_oserror(space, e) + + if rposix.HAVE_UTIMENSAT: + path_b = path.as_bytes + if path_b is None: + raise oefmt(space.w_NotImplementedError, + "utime: unsupported value for 'path'") + try: + if utime_now: + rposix.utimensat( + path_b, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + else: + rposix.utimensat( + path_b, atime_s, atime_ns, mtime_s, mtime_ns, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + return + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + + if not follow_symlinks: + raise argument_unavailable(space, "utime", "follow_symlinks") + + if not space.is_w(w_ns, space.w_None): + raise oefmt(space.w_NotImplementedError, + "utime: 'ns' unsupported on this platform on PyPy") + if utime_now: + try: + call_rposix(rposix.utime, path, None) + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + try: + msg = "utime() arg 2 must be a tuple (atime, mtime) or None" + args_w = space.fixedview(w_times) + if len(args_w) != 2: + raise OperationError(space.w_TypeError, space.wrap(msg)) + actime = space.float_w(args_w[0], allow_conversion=False) + modtime = space.float_w(args_w[1], allow_conversion=False) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap(msg)) + try: + call_rposix(rposix.utime, path, (actime, modtime)) + except OSError as e: + # see comment above + raise wrap_oserror(space, e) + def convert_seconds(space, w_time): if space.isinstance_w(w_time, space.w_float): @@ -1757,13 +1870,19 @@ raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(path='str0') + at unwrap_spec(path=path_or_fd(allow_fd=hasattr(os, 'fpathconf'))) def pathconf(space, path, w_name): num = confname_w(space, w_name, os.pathconf_names) - try: - res = os.pathconf(path, num) - except OSError, e: - raise wrap_oserror(space, e) + if path.as_fd != -1: + try: + res = os.fpathconf(path.as_fd, num) + except OSError, e: + raise wrap_oserror(space, e) + else: + try: + res = os.pathconf(path.as_bytes, num) + except OSError, e: + raise wrap_oserror2(space, e, path.w_path) return space.wrap(res) def confstr(space, w_name): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -174,6 +174,10 @@ import stat st = self.posix.stat(".") assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(b".") + assert stat.S_ISDIR(st.st_mode) + st = self.posix.stat(bytearray(b".")) + assert stat.S_ISDIR(st.st_mode) st = self.posix.lstat(".") assert stat.S_ISDIR(st.st_mode) @@ -185,6 +189,12 @@ assert exc.value.errno == errno.ENOENT assert exc.value.filename == "nonexistentdir/nonexistentfile" + excinfo = raises(TypeError, self.posix.stat, None) + assert "can't specify None" in str(excinfo.value) + excinfo = raises(TypeError, self.posix.stat, 2.) + assert "should be string, bytes or integer, not float" in str(excinfo.value) + raises(ValueError, self.posix.stat, -1) + if hasattr(__import__(os.name), "statvfs"): def test_statvfs(self): st = self.posix.statvfs(".") @@ -250,7 +260,7 @@ try: self.posix.utime('qowieuqw/oeiu', arg) except OSError as e: - assert e.filename == 'qowieuqw/oeiu' + pass else: assert 0 From pypy.commits at gmail.com Wed Apr 20 12:31:23 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 20 Apr 2016 09:31:23 -0700 (PDT) Subject: [pypy-commit] pypy stat_ns: Close branch stat_ns Message-ID: <5717aedb.c9921c0a.965f.ffff9e1c@mx.google.com> Author: Ronan Lamy Branch: stat_ns Changeset: r83789:10b716c07594 Date: 2016-04-20 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/10b716c07594/ Log: Close branch stat_ns From pypy.commits at gmail.com Wed Apr 20 12:32:14 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 20 Apr 2016 09:32:14 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merged in stat_ns (pull request #430) Message-ID: <5717af0e.08851c0a.a74e8.ffffa0ce@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83790:d98d51929afd Date: 2016-04-20 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/d98d51929afd/ Log: Merged in stat_ns (pull request #430) Hackish support for the st_xtime_ns fields in stat_result() diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -36,10 +36,12 @@ # further fields, not accessible by index (the numbers are still needed # but not visible because they are no longer consecutive) - - st_atime = structseqfield(15, "time of last access") - st_mtime = structseqfield(16, "time of last modification") - st_ctime = structseqfield(17, "time of last status change") + st_atime = structseqfield(11, "time of last access") + st_mtime = structseqfield(12, "time of last modification") + st_ctime = structseqfield(13, "time of last change") + st_atime_ns = structseqfield(14, "time of last access in nanoseconds") + st_mtime_ns = structseqfield(15, "time of last modification in nanoseconds") + st_ctime_ns = structseqfield(16, "time of last change in nanoseconds") if "st_blksize" in posix._statfields: st_blksize = structseqfield(20, "blocksize for filesystem I/O") diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -357,44 +357,56 @@ STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) +N_INDEXABLE_FIELDS = 10 + +def _time_ns_from_float(ftime): + "Convert a floating-point time (in seconds) into a (s, ns) pair of ints" + fracpart, intpart = modf(ftime) + if fracpart < 0: + fracpart += 1. + intpart -= 1. + return int(intpart), int(fracpart * 1e9) + + at specialize.arg(4) +def _fill_time(space, lst, index, w_keywords, attrname, ftime): + stat_float_times = space.fromcache(StatState).stat_float_times + seconds, fractional_ns = _time_ns_from_float(ftime) + lst[index] = space.wrap(seconds) + if stat_float_times: + space.setitem(w_keywords, space.wrap(attrname), space.wrap(ftime)) + else: + space.setitem(w_keywords, space.wrap(attrname), space.wrap(seconds)) + w_billion = space.wrap(1000000000) + w_total_ns = space.add(space.mul(space.wrap(seconds), w_billion), + space.wrap(fractional_ns)) + space.setitem(w_keywords, space.wrap(attrname + '_ns'), w_total_ns) + +STANDARD_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS[:7])) +EXTRA_FIELDS = unrolling_iterable(rposix_stat.STAT_FIELDS[10:]) def build_stat_result(space, st): - FIELDS = STAT_FIELDS # also when not translating at all - lst = [None] * rposix_stat.N_INDEXABLE_FIELDS + lst = [None] * N_INDEXABLE_FIELDS w_keywords = space.newdict() - stat_float_times = space.fromcache(StatState).stat_float_times - for i, (name, TYPE) in FIELDS: + for (i, (name, TYPE)) in STANDARD_FIELDS: value = getattr(st, name) - if name in ('st_atime', 'st_mtime', 'st_ctime'): - value = int(value) # rounded to an integer for indexed access w_value = space.wrap(value) - if i < rposix_stat.N_INDEXABLE_FIELDS: - lst[i] = w_value - else: - space.setitem(w_keywords, space.wrap(name), w_value) + lst[i] = w_value - # non-rounded values for name-based access - if stat_float_times: - space.setitem(w_keywords, - space.wrap('st_atime'), space.wrap(st.st_atime)) - space.setitem(w_keywords, - space.wrap('st_mtime'), space.wrap(st.st_mtime)) - space.setitem(w_keywords, - space.wrap('st_ctime'), space.wrap(st.st_ctime)) - else: - space.setitem(w_keywords, - space.wrap('st_atime'), space.wrap(int(st.st_atime))) - space.setitem(w_keywords, - space.wrap('st_mtime'), space.wrap(int(st.st_mtime))) - space.setitem(w_keywords, - space.wrap('st_ctime'), space.wrap(int(st.st_ctime))) + _fill_time(space, lst, 7, w_keywords, 'st_atime', st.st_atime) + _fill_time(space, lst, 8, w_keywords, 'st_mtime', st.st_mtime) + _fill_time(space, lst, 9, w_keywords, 'st_ctime', st.st_ctime) + + for name, TYPE in EXTRA_FIELDS: + value = getattr(st, name) + w_value = space.wrap(value) + space.setitem(w_keywords, space.wrap(name), w_value) w_tuple = space.newtuple(lst) w_stat_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_statvfs_result(space, st): vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) From pypy.commits at gmail.com Wed Apr 20 14:08:47 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 20 Apr 2016 11:08:47 -0700 (PDT) Subject: [pypy-commit] pypy default: expand header of release note Message-ID: <5717c5af.47afc20a.a1dc2.6ed9@mx.google.com> Author: mattip Branch: Changeset: r83791:ede770bcad43 Date: 2016-04-20 21:07 +0300 http://bitbucket.org/pypy/pypy/changeset/ede770bcad43/ Log: expand header of release note diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -3,10 +3,17 @@ ======== We have released PyPy 5.1, about a month after PyPy 5.0. -We encourage all users of PyPy to update to this version. Apart from the usual -bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. We also updated cffi_ to 1.6 + +This release includes more improvement to warmup time and memory +requirements. We have seen about a 20% memory requirement reduction and up to +30% warmup time improvement, more detail in the `blog post`_. + +We also now have `fully support for the IBM s390x`_. Since this support is in +`RPython`_, any dynamic language written using RPython, like PyPy, will +automagically be supported on that architecture. + +We updated cffi_ to 1.6, and continue to improve support for the wider +python ecosystem using the PyPy interpreter. You can download the PyPy 5.1 release here: @@ -27,6 +34,8 @@ .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy .. _cffi: https://cffi.readthedocs.org +.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html +.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html What is PyPy? ============= @@ -105,7 +114,7 @@ our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* Numpy: +* Numpy_: * Implemented numpy.where for a single argument @@ -141,7 +150,7 @@ * Update rpython functions with ones needed for py3k .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html -.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _Numpy: https://bitbucket.org/pypy/numpy Please update, and continue to help us make PyPy better. From pypy.commits at gmail.com Wed Apr 20 14:49:36 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 11:49:36 -0700 (PDT) Subject: [pypy-commit] pypy default: Stop early if $branchname or $tagname are invalid Message-ID: <5717cf40.021b1c0a.ee4ee.ffffd03f@mx.google.com> Author: Armin Rigo Branch: Changeset: r83792:b3a73ac614ea Date: 2016-04-20 09:11 +0200 http://bitbucket.org/pypy/pypy/changeset/b3a73ac614ea/ Log: Stop early if $branchname or $tagname are invalid diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -3,7 +3,11 @@ min=1 rev=0 branchname=release-$maj.x # ==OR== release-$maj.$min.x -tagname=release-$maj.$min.$rev +tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev + +hg log -r $branchname || exit 1 +hg log -r $tagname || exit 1 + # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. From pypy.commits at gmail.com Wed Apr 20 14:49:38 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 11:49:38 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2277: only special-case two exact lists, not list subclasses, Message-ID: <5717cf42.d3161c0a.f9c08.ffffd4e8@mx.google.com> Author: Armin Rigo Branch: Changeset: r83793:c6fad9d028ef Date: 2016-04-20 20:49 +0200 http://bitbucket.org/pypy/pypy/changeset/c6fad9d028ef/ Log: Issue #2277: only special-case two exact lists, not list subclasses, because an overridden __iter__() should be called (probably) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -180,10 +180,9 @@ def specialized_zip_2_lists(space, w_list1, w_list2): from pypy.objspace.std.listobject import W_ListObject - if (not isinstance(w_list1, W_ListObject) or - not isinstance(w_list2, W_ListObject)): + if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject: raise OperationError(space.w_TypeError, - space.wrap("expected two lists")) + space.wrap("expected two exact lists")) if space.config.objspace.std.withspecialisedtuple: intlist1 = w_list1.getitems_int() From pypy.commits at gmail.com Wed Apr 20 14:49:40 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 11:49:40 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <5717cf44.022ec20a.a5593.7d91@mx.google.com> Author: Armin Rigo Branch: Changeset: r83794:27011fa25b9e Date: 2016-04-20 20:49 +0200 http://bitbucket.org/pypy/pypy/changeset/27011fa25b9e/ Log: merge heads diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -3,10 +3,17 @@ ======== We have released PyPy 5.1, about a month after PyPy 5.0. -We encourage all users of PyPy to update to this version. Apart from the usual -bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. We also updated cffi_ to 1.6 + +This release includes more improvement to warmup time and memory +requirements. We have seen about a 20% memory requirement reduction and up to +30% warmup time improvement, more detail in the `blog post`_. + +We also now have `fully support for the IBM s390x`_. Since this support is in +`RPython`_, any dynamic language written using RPython, like PyPy, will +automagically be supported on that architecture. + +We updated cffi_ to 1.6, and continue to improve support for the wider +python ecosystem using the PyPy interpreter. You can download the PyPy 5.1 release here: @@ -27,6 +34,8 @@ .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy .. _cffi: https://cffi.readthedocs.org +.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html +.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html What is PyPy? ============= @@ -47,7 +56,7 @@ * big- and little-endian variants of **PPC64** running Linux, - * **s960x** running Linux + * **s390x** running Linux .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org @@ -105,7 +114,7 @@ our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* Numpy: +* Numpy_: * Implemented numpy.where for a single argument @@ -141,7 +150,7 @@ * Update rpython functions with ones needed for py3k .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html -.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _Numpy: https://bitbucket.org/pypy/numpy Please update, and continue to help us make PyPy better. From pypy.commits at gmail.com Wed Apr 20 15:25:17 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 20 Apr 2016 12:25:17 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update website download page for 5.1 Message-ID: <5717d79d.aa5ec20a.45b87.ffff8f05@mx.google.com> Author: mattip Branch: extradoc Changeset: r732:c3535bec12d7 Date: 2016-04-20 22:24 +0300 http://bitbucket.org/pypy/pypy.org/changeset/c3535bec12d7/ Log: update website download page for 5.1 diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -74,7 +74,7 @@ performance improvements.

We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:

@@ -113,21 +113,21 @@ degrees of being up-to-date. -
-

Python2.7 compatible PyPy 5.0.1

+
+

Python2.7 compatible PyPy 5.1

@@ -196,7 +196,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-5.0.1/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy-5.1.0/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

@@ -217,7 +217,7 @@

If you have pip:

 pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
-pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-5.0
+pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-5.1
 

(the second version selects a particular tag, which may be needed if your pypy is not the latest development version.)

@@ -241,7 +241,7 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    @@ -379,17 +379,18 @@

    Checksums

    Here are the checksums for each of the downloads

    -

    pypy-5.0.1 md5:

    +

    pypy-5.1.0 md5:

    -5544c118d270138125fec1ec5659ef80  pypy-5.0.1-linux-armel.tar.bz2
    -34d6cf783cf585bbfff1b394d2db9a26  pypy-5.0.1-linux-armhf-raring.tar.bz2
    -224546fb5999c4b08b2b1c51e40dc055  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    -3f05067352f25e23eae618dca96062a2  pypy-5.0.1-linux.tar.bz2
    -6a6b19f6c13b42f4ce9d0c0d892f597a  pypy-5.0.1-linux64.tar.bz2
    -bec87524ebc3f11c9c9817f64311ef65  pypy-5.0.1-osx64.tar.bz2
    -798c6e83536a5fa5ed7d6efb4d06db1a  pypy-5.0.1-src.tar.bz2
    -928761075bcc2d01f9f884eeee105bd0  pypy-5.0.1-src.zip
    -2e53db6766a718084c9327a6059f8ad7  pypy-5.0.1-win32.zip
    +17baf9db5200559b9d6c45ec8f60ea48  pypy-5.1.0-linux-armel.tar.bz2
    +c0f360b601cd723031c0edc18b62f118  pypy-5.1.0-linux-armhf-raring.tar.bz2
    +27e5e98ccbca5ebb5933147556a46f77  pypy-5.1.0-linux-armhf-raspbian.tar.bz2
    +224d1f124393c96c98b9acbaf4f92078  pypy-5.1.0-linux.tar.bz2
    +2a58aa928ae1cabc6a3309cf98f6182e  pypy-5.1.0-linux64.tar.bz2
    +7f546940acb3ceebb5967697a9b05b65  pypy-5.1.0-osx64.tar.bz2
    +f9362ffc2946efcaadcc40fdb2c43df7  pypy-5.1.0-s390x.tar.bz2
    +d0a76859c83fb0427674273977086cb2  pypy-5.1.0-src.tar.bz2
    +204273a21dbf71c0827966265c40eb7a  pypy-5.1.0-src.zip
    +a1710ae6f15b567bf3c8fd608553ad48  pypy-5.1.0-win32.zip
     

    pypy3-2.4.0 md5:

    @@ -408,29 +409,31 @@
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    -

    pypy-5.0.1 sha1:

    +

    pypy-5.1.0 sha1:

    -d2df9030c670e178e2ee9b99934174184fe8aa1c  pypy-5.0.1-linux-armel.tar.bz2
    -89534b3b09336165bf706a459f170ae3628da891  pypy-5.0.1-linux-armhf-raring.tar.bz2
    -ecce668b3ec9d1a5d70e99ea4d0ce7491ca860e5  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    -b814bb1b70b39c1e601a15e8bb809f525d6ef04d  pypy-5.0.1-linux.tar.bz2
    -26f6bdada77adb2f79bce97513fdb58a91e6e967  pypy-5.0.1-linux64.tar.bz2
    -54eae1b3da6c29ba4bc5db35b89c23e6080a6d09  pypy-5.0.1-osx64.tar.bz2
    -e96dad1562c4a91b26612f0fad0e70d0635399ed  pypy-5.0.1-src.tar.bz2
    -f7e4cda496244eefc50323704c48c10b568937cf  pypy-5.0.1-src.zip
    -f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49  pypy-5.0.1-win32.zip
    +114d4f981956b83cfbc0a3c819fdac0b0550cd82  pypy-5.1.0-linux-armel.tar.bz2
    +e3060f8fa765c317ec1ad6923f9ea595b9d411c3  pypy-5.1.0-linux-armhf-raring.tar.bz2
    +8943448afd1fd3e89be0575f69c6f3be69f2efbc  pypy-5.1.0-linux-armhf-raspbian.tar.bz2
    +229e7dbc130d2cc92be9d1cde88f2d6f7f28621b  pypy-5.1.0-linux.tar.bz2
    +c959524ce180f801bdbcbee4ca038309e1c771dd  pypy-5.1.0-linux64.tar.bz2
    +216a52e44f3642176cf05fc3b4c6e2cf8981e400  pypy-5.1.0-osx64.tar.bz2
    +b696059359a780ad3c2641b14c989021d93015e8  pypy-5.1.0-s390x.tar.bz2
    +c9c497836e6235af9fee2a98e4aeaa2bc3a29550  pypy-5.1.0-src.tar.bz2
    +a184ef5ada93d53e8dc4a9850a9ed764bd661d7b  pypy-5.1.0-src.zip
    +4daba0932afcc4755d93d55aa3cbdd851da9198d  pypy-5.1.0-win32.zip
     
    -

    pypy-5.0.1 sha256:

    +

    pypy-5.1.0 sha256:

    -17d55804b2253acd9de42276d756d4a08b7d1d2da09ef81dd325e14b18a1bcda  pypy-5.0.1-linux-armel.tar.bz2
    -1e9146978cc7e7bd30683a518f304a824db7b9b1c6fae5e866eb703684ba3c98  pypy-5.0.1-linux-armhf-raring.tar.bz2
    -338d1c32c1326e6321b222ae357711b38c4a0ffddf020c2a35536b5f69376e28  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    -4b9a294033f917a1674c9ddcb2e7e8d32c4f4351f8216fd1fe23f6d2ad2b1a36  pypy-5.0.1-linux.tar.bz2
    -1b1363a48edd1c1b31ca5e995987eda3d460a3404f36c3bb2dd9f52c93eecff5  pypy-5.0.1-linux64.tar.bz2
    -6ebdb9d91203f053b38e3c21841c11a72f416dc185f7b3b7c908229df15e924a  pypy-5.0.1-osx64.tar.bz2
    -1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
    -6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
    -c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
    +ea7017449ff0630431866423220c3688fc55c1a0b80a96af0ae138dd0751b81c  pypy-5.1.0-linux-armel.tar.bz2
    +a3e13083591bccc301fb974ff0a6c7e4ab4e611e4b31c0932898b981c794462b  pypy-5.1.0-linux-armhf-raring.tar.bz2
    +3bfcd251b4f3fd1a09520b2741c647c364d16d50c82b813732a78ac60ccb2b69  pypy-5.1.0-linux-armhf-raspbian.tar.bz2
    +2f6c521b5b3c1082eab58be78655aa01ec400d19baeec93c455864a7483b8744  pypy-5.1.0-linux.tar.bz2
    +0e8913351d043a50740b98cb89d99852b8bd6d11225a41c8abfc0baf7084cbf6  pypy-5.1.0-linux64.tar.bz2
    +7e270c66347158dd794c101c4817f742f760ed805aa0d10abe19ba4a78a75118  pypy-5.1.0-osx64.tar.bz2
    +096827f2cb041f9decc5a2b0b8fc6b5fe0748f229b0419fd73982e0714a292cd  pypy-5.1.0-s390x.tar.bz2
    +16bab9501e942c0704abbf9cd6c4e950c6a76dc226cf1e447ea084916aef4714  pypy-5.1.0-src.tar.bz2
    +afc1c72651c90418b57692a5628481dd09a3d3172765fd206e8bcdac7b1bf02d  pypy-5.1.0-src.zip
    +044e7f35223a443412b5948740e60e93069a6f8b0a72053cc9d472874bb1b6cc  pypy-5.1.0-win32.zip
     3373b1d51fc610b962e0b535087073f2cc921ab0269ba2896b140ab4a56588fd  pypy-5.0.1++-ppc64.tar.bz2
     53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af  pypy-5.0.1+-ppc64le.tar.bz2
     
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -14,12 +14,12 @@ We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for: -* the Python2.7 compatible release — **PyPy 5.0.1** — (`what's new in PyPy 5.0.1?`_) +* the Python2.7 compatible release — **PyPy 5.1** — (`what's new in PyPy 5.1?`_) * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_). * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) -.. _what's new in PyPy 5.0.1?: http://doc.pypy.org/en/latest/release-5.0.1.html +.. _what's new in PyPy 5.1?: http://doc.pypy.org/en/latest/release-5.1.0.html .. _what's new in PyPy3 2.4.0?: http://doc.pypy.org/en/latest/release-pypy3-2.4.0.html @@ -73,7 +73,7 @@ .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux -Python2.7 compatible PyPy 5.0.1 +Python2.7 compatible PyPy 5.1 ----------------------------------- * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) @@ -91,17 +91,17 @@ * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armhf-raspbian.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armhf-raring.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armel.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1++-ppc64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1+-ppc64le.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armhf-raspbian.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armhf-raring.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-linux-armel.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0++-ppc64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0+-ppc64le.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.zip .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://buildbot.pypy.org/mirror/ @@ -201,7 +201,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in ``/opt``, and if you want, put a symlink from somewhere like -``/usr/local/bin/pypy`` to ``/path/to/pypy-5.0.1/bin/pypy``. Do +``/usr/local/bin/pypy`` to ``/path/to/pypy-5.1.0/bin/pypy``. Do not move or copy the executable ``pypy`` outside the tree --- put a symlink to it, otherwise it will not find its libraries. @@ -231,7 +231,7 @@ If you have pip:: pypy -m pip install git+https://bitbucket.org/pypy/numpy.git - pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-5.0 + pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-5.1 (the second version selects a particular tag, which may be needed if your pypy is not the latest development version.) @@ -261,9 +261,9 @@ 1. Get the source code. The following packages contain the source at the same revision as the above binaries: - * `pypy-5.0.1-src.tar.bz2`__ (sources) + * `pypy-5.1.0-src.tar.bz2`__ (sources) - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.1.0-src.tar.bz2 Or you can checkout the current trunk using Mercurial_ (the trunk usually works and is of course more up-to-date):: From pypy.commits at gmail.com Wed Apr 20 16:03:22 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 20 Apr 2016 13:03:22 -0700 (PDT) Subject: [pypy-commit] buildbot default: deactivate nightly jit-benchmark-linux-x86-64-single-run runs Message-ID: <5717e08a.519d1c0a.6dfa.ffffe94c@mx.google.com> Author: mattip Branch: Changeset: r1001:6cada2d6955f Date: 2016-04-20 23:03 +0300 http://bitbucket.org/pypy/buildbot/changeset/6cada2d6955f/ Log: deactivate nightly jit-benchmark-linux-x86-64-single-run runs diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -291,7 +291,7 @@ LINUX64, # on speed-old, uses all cores JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) - JITBENCH64_NEW, # on speed64, uses 1 core (in part exclusively) + #JITBENCH64_NEW, # on speed64, uses 1 core (in part exclusively) ], branch=None, hour=1, minute=0), From pypy.commits at gmail.com Wed Apr 20 16:37:13 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 20 Apr 2016 13:37:13 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add defines to post_include_bits when translating and name not in pypy_macros.h Message-ID: <5717e879.aa5ec20a.45b87.ffffa805@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83795:bed7808066ac Date: 2016-04-20 23:35 +0300 http://bitbucket.org/pypy/pypy/changeset/bed7808066ac/ Log: add defines to post_include_bits when translating and name not in pypy_macros.h diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1220,7 +1220,7 @@ # Generate definitions for global structures structs = ["#include "] if use_micronumpy: - structs.append('#include ') + structs.append('#include /* api.py line 1223 */) for name, (typ, expr) in GLOBALS.iteritems(): if '#' in name: structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) @@ -1279,14 +1279,15 @@ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() + prefix = 'PyPy' - generate_macros(export_symbols, prefix='PyPy') + generate_macros(export_symbols, prefix=prefix) functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix='PyPy') + prefix=prefix) code = "#include \n" if use_micronumpy: - code += "#include " + code += "#include /* api.py line 1290 */" code += "\n".join(functions) eci = build_eci(False, export_symbols, code, use_micronumpy) @@ -1302,9 +1303,13 @@ include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): if '#' in name: - name = name.split('#')[0] + name, header = name.split('#') assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') - typ, name = typ[:-1], name[:-1] + typ = typ[:-1] + if header != pypy_decl: + # since the #define is not in pypy_macros, do it here + mname = mangle_name(prefix, name) + include_lines.append('#define %s %s\n' % (name, mname)) elif name.startswith('PyExc_'): typ = 'PyTypeObject' name = '_' + name @@ -1331,6 +1336,8 @@ for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + continue newname = mangle_name('PyPy', name) or name deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) From pypy.commits at gmail.com Wed Apr 20 17:02:56 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 20 Apr 2016 14:02:56 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: remove debug cruft Message-ID: <5717ee80.972e1c0a.99515.fffff728@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83796:8f7fc6ed39a0 Date: 2016-04-21 00:02 +0300 http://bitbucket.org/pypy/pypy/changeset/8f7fc6ed39a0/ Log: remove debug cruft diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -259,12 +259,12 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyDictProxy_Check(space, w_obj): w_typ = make_frozendict(space) - print 'check', w_typ, space.type(w_obj) + #print 'check', w_typ, space.type(w_obj) return space.isinstance_w(w_obj, w_typ) @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyDictProxy_CheckExact(space, w_obj): w_typ = make_frozendict(space) - print 'exact', w_typ, w_obj + #print 'exact', w_typ, w_obj return space.is_w(space.type(w_obj), w_typ) From pypy.commits at gmail.com Wed Apr 20 17:31:37 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 20 Apr 2016 14:31:37 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Replace empty dict with NULL Message-ID: <5717f539.c11a1c0a.2744.1375@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83797:9ede3a4c7b4d Date: 2016-04-20 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/9ede3a4c7b4d/ Log: Replace empty dict with NULL diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -278,6 +278,8 @@ args_w = space.fixedview(w_args) w_subtype = args_w[0] w_args = space.newtuple(args_w[1:]) + if not space.is_true(w_kwds): + w_kwds = None try: subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype)) From pypy.commits at gmail.com Wed Apr 20 17:42:55 2016 From: pypy.commits at gmail.com (mjacob) Date: Wed, 20 Apr 2016 14:42:55 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Get rid of tab in Python source file. Message-ID: <5717f7df.8a37c20a.a6f42.ffffbfa2@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83798:828214e58f10 Date: 2016-04-20 23:36 +0200 http://bitbucket.org/pypy/pypy/changeset/828214e58f10/ Log: Get rid of tab in Python source file. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1193,9 +1193,8 @@ space.newtuple(gears) ] if self.lst is not None: - result_w = result_w + [ - space.newtuple([ - space.wrap(index) for index in self.indices])] + indices_w = [space.wrap(index) for index in self.indices] + result_w = result_w + [space.newtuple(indices_w)] else: result_w = [ space.type(self), From pypy.commits at gmail.com Wed Apr 20 17:42:57 2016 From: pypy.commits at gmail.com (mjacob) Date: Wed, 20 Apr 2016 14:42:57 -0700 (PDT) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <5717f7e1.0113c20a.464a8.ffffb36b@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83799:9f5913d6c6c2 Date: 2016-04-20 23:37 +0200 http://bitbucket.org/pypy/pypy/changeset/9f5913d6c6c2/ Log: 2to3 diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1176,22 +1176,26 @@ """ def test_crash_mro_without_object_1(self): + """ class X(type): def mro(self): return [self] - class C: - __metaclass__ = X + class C(metaclass=X): + pass e = raises(TypeError, C) # the lookup of '__new__' fails assert str(e.value) == "cannot create 'C' instances" + """ def test_crash_mro_without_object_2(self): + """ class X(type): def mro(self): return [self, int] - class C(int): - __metaclass__ = X + class C(int, metaclass=X): + pass C() # the lookup of '__new__' succeeds in 'int', # but the lookup of '__init__' fails + """ class AppTestWithMethodCacheCounter: From pypy.commits at gmail.com Wed Apr 20 17:46:24 2016 From: pypy.commits at gmail.com (mjacob) Date: Wed, 20 Apr 2016 14:46:24 -0700 (PDT) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <5717f8b0.876cc20a.1fc8c.ffffb992@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83800:40c11f7ad71a Date: 2016-04-20 23:44 +0200 http://bitbucket.org/pypy/pypy/changeset/40c11f7ad71a/ Log: 2to3 diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -56,7 +56,7 @@ from __pypy__ import _promote assert _promote(1) == 1 assert _promote(1.1) == 1.1 - assert _promote("abc") == "abc" + assert _promote(b"abc") == b"abc" raises(TypeError, _promote, u"abc") l = [] assert _promote(l) is l From pypy.commits at gmail.com Wed Apr 20 21:53:28 2016 From: pypy.commits at gmail.com (pjenvey) Date: Wed, 20 Apr 2016 18:53:28 -0700 (PDT) Subject: [pypy-commit] pypy py3k: translation fixes for osx Message-ID: <57183298.4849c20a.8b85f.0d39@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83801:9ba0ca55f93a Date: 2016-04-20 18:52 -0700 http://bitbucket.org/pypy/pypy/changeset/9ba0ca55f93a/ Log: translation fixes for osx diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1023,11 +1023,11 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - if dir_fd == DEFAULT_DIR_FD: - dispatch_filename(rposix.mkfifo)(space, w_path, mode) - else: + if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: path = space.fsencode_w(w_path) rposix.mkfifoat(path, mode, dir_fd) + else: + dispatch_filename(rposix.mkfifo)(space, w_path, mode) except OSError as e: raise wrap_oserror2(space, e, w_path) @@ -1047,11 +1047,11 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - if dir_fd == DEFAULT_DIR_FD: - dispatch_filename(rposix.mknod)(space, w_filename, mode, device) - else: + if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: fname = space.fsencode_w(w_filename) rposix.mknodat(fname, mode, device, dir_fd) + else: + dispatch_filename(rposix.mknod)(space, w_filename, mode, device) except OSError as e: raise wrap_oserror2(space, e, w_filename) @@ -1405,7 +1405,7 @@ atime_s, atime_ns = convert_ns(space, args_w[0]) mtime_s, mtime_ns = convert_ns(space, args_w[1]) - if path.as_fd != -1: + if rposix.HAVE_FUTIMENS and path.as_fd != -1: if dir_fd != DEFAULT_DIR_FD: raise oefmt(space.w_ValueError, "utime: can't specify both dir_fd and fd") From pypy.commits at gmail.com Thu Apr 21 03:52:03 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 00:52:03 -0700 (PDT) Subject: [pypy-commit] buildbot default: update the buildbot location for bencher4 Message-ID: <571886a3.a82cc20a.44b20.69f4@mx.google.com> Author: Armin Rigo Branch: Changeset: r1002:33fab5c7253b Date: 2016-04-21 09:52 +0200 http://bitbucket.org/pypy/buildbot/changeset/33fab5c7253b/ Log: update the buildbot location for bencher4 diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -26,8 +26,9 @@ SpeedPythonCPU = locks.MasterLock('speed_python_cpu', maxCount=24) WinSlaveLock = locks.SlaveLock('win_cpu', maxCount=1) # speed-old has 24 cores, but memory for ~2 translations -SpeedOldLock = locks.MasterLock('speed_old_lock', maxCount=2) - +#SpeedOldLock = locks.MasterLock('speed_old_lock', maxCount=2) +# bencher4 has 8 cores, 32 GB RAM +Bencher4Lock = locks.MasterLock('bencher4_lock', maxCount=4) # The cross translation machine can accomodate 2 jobs at the same time ARMCrossLock = locks.MasterLock('arm_cpu', maxCount=2) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -50,7 +50,8 @@ TannitCPU = pypybuilds.TannitCPU WinSlaveLock = pypybuilds.WinSlaveLock -SpeedOldLock = pypybuilds.SpeedOldLock +#SpeedOldLock = pypybuilds.SpeedOldLock +Bencher4Lock = pypybuilds.Bencher4Lock pypyOwnTestFactory = pypybuilds.Own() pypyOwnTestFactoryWin = pypybuilds.Own(platform="win32") @@ -266,15 +267,15 @@ 'schedulers': [ # the benchmarks run on tannit and (planned) speed-old.python.org. - # All the other linux tests run on speed-old.python.org. + # All the other linux tests run on bencher4.soft-dev.org. Nightly("nightly-0-00", [ # benchmarks # linux tests LINUX32, # on tannit32, uses all cores JITLINUX32, # on tannit32, uses 1 core - JITLINUX64, # on speed-old, uses 1 core + JITLINUX64, # on bencher4, uses 1 core #APPLVLLINUX32, # on tannit32, uses 1 core - #APPLVLLINUX64, # on speed-old, uses 1 core + #APPLVLLINUX64, # on bencher4, uses 1 core # other platforms #MACOSX32, # on minime JITWIN32, # on allegro_win32, SalsaSalsa @@ -288,7 +289,7 @@ ], branch='default', hour=0, minute=0), Nightly("nightly-1-00", [ - LINUX64, # on speed-old, uses all cores + LINUX64, # on bencher4, uses all cores JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) #JITBENCH64_NEW, # on speed64, uses 1 core (in part exclusively) @@ -304,8 +305,8 @@ ]), Nightly("nightly-3-00-py3k", [ - LINUX64, # on speed-old, uses all cores - JITLINUX64, # on speed-old, uses 1 core + LINUX64, # on bencher4, uses all cores + JITLINUX64, # on bencher4, uses 1 core ], branch="py3k", hour=3, minute=0), # S390X vm (ibm-research) @@ -377,11 +378,11 @@ "locks": [TannitCPU.access('counting')], }, {"name": LINUX64, - "slavenames": ["speed-old"], + "slavenames": ["bencher4", "speed-old"], "builddir": LINUX64, "factory": pypyOwnTestFactory, "category": 'linux64', - "locks": [SpeedOldLock.access('counting')], + "locks": [Bencher4Lock.access('counting')], }, {"name": APPLVLLINUX32, #"slavenames": ["allegro32"], @@ -392,11 +393,11 @@ "locks": [TannitCPU.access('counting')], }, {"name": APPLVLLINUX64, - "slavenames": ["speed-old"], + "slavenames": ["bencher4", "speed-old"], "builddir": APPLVLLINUX64, "factory": pypyTranslatedAppLevelTestFactory64, "category": "linux64", - "locks": [SpeedOldLock.access('counting')], + "locks": [Bencher4Lock.access('counting')], }, {"name": LIBPYTHON_LINUX32, "slavenames": ["tannit32"], @@ -407,11 +408,11 @@ "locks": [TannitCPU.access('counting')], }, {"name": LIBPYTHON_LINUX64, - "slavenames": ["speed-old"], + "slavenames": ["bencher4", "speed-old"], "builddir": LIBPYTHON_LINUX64, "factory": pypyTranslatedLibPythonTestFactory, "category": "linux64", - "locks": [SpeedOldLock.access('counting')], + "locks": [Bencher4Lock.access('counting')], }, {"name" : JITLINUX32, #"slavenames": ["allegro32"], @@ -422,11 +423,11 @@ "locks": [TannitCPU.access('counting')], }, {'name': JITLINUX64, - 'slavenames': ["speed-old"], + 'slavenames': ["bencher4", "speed-old"], 'builddir': JITLINUX64, 'factory': pypyJITTranslatedTestFactory64, 'category': 'linux64', - "locks": [SpeedOldLock.access('counting')], + "locks": [Bencher4Lock.access('counting')], }, {"name": JITBENCH, "slavenames": ["tannit32"], @@ -443,11 +444,11 @@ # the locks are acquired with fine grain inside the build }, {"name": JITBENCH64_NEW, - "slavenames": ['speed-old'], + "slavenames": [], # was: 'speed-old' "builddir": JITBENCH64_NEW, "factory": pypyJITBenchmarkFactory64_speed, "category": "benchmark-run", - "locks": [SpeedOldLock.access('exclusive')], + "locks": [Bencher4Lock.access('exclusive')], }, {"name": MACOSX32, "slavenames": ["minime"], From pypy.commits at gmail.com Thu Apr 21 04:13:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 01:13:44 -0700 (PDT) Subject: [pypy-commit] pypy default: list libgc-dev (it used to be listed somewhere, but was forgotten) Message-ID: <57188bb8.26b0c20a.18b87.770c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83802:659dd035cdd4 Date: 2016-04-21 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/659dd035cdd4/ Log: list libgc-dev (it used to be listed somewhere, but was forgotten) diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -102,7 +102,7 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev + tk-dev libgc-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. From pypy.commits at gmail.com Thu Apr 21 04:18:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Apr 2016 01:18:49 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: working on the API for get_location (nearly done) Message-ID: <57188ce9.e21bc20a.4fd9b.73ac@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83803:794f3f697344 Date: 2016-04-21 10:18 +0200 http://bitbucket.org/pypy/pypy/changeset/794f3f697344/ Log: working on the API for get_location (nearly done) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -49,12 +49,8 @@ name = bytecode.co_name if not name: name = "" - # we can probably do better at co_firstlineno? - return (bytecode.co_filename, - bytecode.co_firstlineno, - name, - intmask(next_instr), - opname) + return "shshs", [bytecode.co_filename, bytecode.co_firstlineno, + name, intmask(next_instr), opname] def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -3,7 +3,7 @@ from rpython.rlib.jit import JitDriver, unroll_parameters, set_param from rpython.rlib.jit import PARAMETERS, dont_look_inside from rpython.rlib.jit import promote, _get_virtualizable_token -from rpython.rlib import jit_hooks, rposix +from rpython.rlib import jit_hooks, rposix, jitlog from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField from rpython.jit.backend.detect_cpu import getcpuclass @@ -48,8 +48,11 @@ lltype.Float, macro=True, releasegil=True, compilation_info=eci) + @jitlog.returns(jitlog.MP_FILENAME, + jitlog.MP_LINENO, + jitlog.MP_INDEX) def get_location(): - return "file", 0, "func", 0, "opcode" + return ("/home.py",0,0) jitdriver = JitDriver(greens = [], reds = ['total', 'frame', 'j'], diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -5,6 +5,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import compute_unique_id, always_inline +from rpython.rlib import jitlog as jl import sys import weakref import struct @@ -32,6 +33,7 @@ MARK_JITLOG_COUNTER = 0x20 MARK_START_TRACE = 0x21 +MARK_INIT_MERGE_POINT = 0x22 MARK_JITLOG_HEADER = 0x23 MARK_JITLOG_DEBUG_MERGE_POINT = 0x24 @@ -71,6 +73,18 @@ else: return encode_le_64bit(val) +def encode_type(type, value): + if type == "s": + return encode_str(value) + elif type == "q": + return encode_le_64bit(value) + elif type == "i": + return encode_le_32bit(value) + elif type == "h": + return encode_le_32bit(value) + else: + raise NotImplementedError + def assemble_header(): version = JITLOG_VERSION_16BIT_LE count = len(resoperations.opname) @@ -110,7 +124,7 @@ else: content.append(encode_str('loop')) content.append(encode_le_addr(int(entry_bridge))) - self.cintf._write_marked(MARK_START_TRACE, ''.join(content)) + self._write_marked(MARK_START_TRACE, ''.join(content)) self.trace_id += 1 def _write_marked(self, mark, line): @@ -161,6 +175,7 @@ self.tag = tag self.mc = mc self.logger = logger + self.merge_point_file = None def write_trace(self, trace): ops = [] @@ -169,9 +184,9 @@ ops.append(i.next()) self.write(i.inputargs, ops) - def write(self, args, ops, faildescr=None, ops_offset={}): + def write(self, args, ops, ops_offset={}): log = self.logger - log._write_marked(self.tag, encode_le_addr(self.trace_id)) + log._write_marked(self.tag, encode_le_addr(self.logger.trace_id)) # input args str_args = [self.var_to_str(arg) for arg in args] @@ -196,16 +211,40 @@ self.memo = {} + def encode_once(self): + pass + def encode_debug_info(self, op): log = self.logger jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - filename, lineno, enclosed, index, opname = jd_sd.warmstate.get_location(op.getarglist()[3:]) + if not jd_sd.warmstate.get_location: + return + types = jd_sd.warmstate.get_location_types + values = jd_sd.warmstate.get_location(op.getarglist()[3:]) + if values is None: + # indicates that this function is not provided to the jit driver + return + + if self.merge_point_file is None: + # first time visiting a merge point + positions = jd_sd.warmstate.get_location_positions + encoded_types = [] + for i, (semantic_type, _) in enumerate(positions): + encoded_types.append(chr(semantic_type)) + if semantic_type == jl.MP_FILENAME: + self.common_prefix = values[i] + log._write_marked(MARK_INIT_MERGE_POINT, ''.join(encoded_types)) + + + # the types have already been written line = [] - line.append(encode_str(filename or "")) - line.append(encode_le_16bit(lineno)) - line.append(encode_str(enclosed or "")) - line.append(encode_le_64bit(index)) - line.append(encode_str(opname or "")) + for i,(sem_type,gen_type) in enumerate(types): + value = values[i] + if sem_type == jl.PM_FILENAME: + self.common_prefix = os.path.commonpath([self.common_prefix, value]) + log._write_marked(MARK_COMMON_PREFIX, chr(jl.PM_FILENAME) + \ + encode_str(self.common_prefix)) + line.append(encode_type(gen_type, value)) log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, ''.join(line)) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -6,6 +6,7 @@ cast_base_ptr_to_instance, hlstr, cast_instance_to_gcref) from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator import model as annmodel +from rpython.annotator.dictdef import DictDef from rpython.rtyper.llinterp import LLException from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.flowspace.model import SpaceOperation, Variable, Constant @@ -563,14 +564,12 @@ jd._maybe_compile_and_run_fn = maybe_compile_and_run def make_driverhook_graphs(self): - s_Str = annmodel.SomeString() - s_Str_None = annmodel.SomeString(can_be_None=True) - s_Int = annmodel.SomeInteger() # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: jd._printable_loc_ptr = self._make_hook_graph(jd, - annhelper, jd.jitdriver.get_printable_location, s_Str) + annhelper, jd.jitdriver.get_printable_location, + annmodel.SomeString()) jd._get_unique_id_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_unique_id, annmodel.SomeInteger()) jd._confirm_enter_jit_ptr = self._make_hook_graph(jd, @@ -581,9 +580,36 @@ jd._should_unroll_one_iteration_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.should_unroll_one_iteration, annmodel.s_Bool) - s_Tuple = annmodel.SomeTuple([s_Str_None, s_Int, s_Str_None, s_Int, s_Str_None]) + # + s_Str = annmodel.SomeString(no_nul=True) + s_Int = annmodel.SomeInteger() + items = [] + types = () + pos = () + if jd.jitdriver.get_location: + assert hasattr(jd.jitdriver.get_location, '_loc_types'), """ + You must decorate your get_location function: + + from rpython.rlib import jitlog as jl + @jl.returns(jl.MP_FILENAME, jl.MP_XXX, ...) + def get_loc(your, green, keys): + name = "x.txt" # extract it from your green keys + return (name, ...) + """ + types = jd.jitdriver.get_location._loc_types + del jd.jitdriver.get_location._loc_types + # + for _,type in types: + if type == 's': + items.append(s_Str) + elif type == 'i': + items.append(s_Int) + else: + raise NotImplementedError + s_Tuple = annmodel.SomeTuple(items) jd._get_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_location, s_Tuple) + jd._get_loc_types = types annhelper.finish() def _make_hook_graph(self, jitdriver_sd, annhelper, func, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -6,6 +6,7 @@ from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.debug import have_debug_prints_for from rpython.rlib.jit import PARAMETERS +from rpython.rlib import jitlog from rpython.rlib.nonconst import NonConstant from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict from rpython.rlib.rarithmetic import intmask, r_uint @@ -678,26 +679,33 @@ drivername = jitdriver.name else: drivername = '' - # get_location new API + # get_location returns get_location_ptr = self.jitdriver_sd._get_location_ptr - if get_location_ptr is None: - missing_get_loc = '(%s: no get_location)' % drivername - def get_location(greenkey): - return (missing_get_loc, 0, '', 0, '') - else: + if get_location_ptr is not None: + types = self.jitdriver_sd._get_loc_types unwrap_greenkey = self.make_unwrap_greenkey() + unrolled_types = unrolling_iterable(enumerate(types)) def get_location(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) tuple_ptr = fn(*greenargs) - # it seems there is no "hltuple" function - return (hlstr(tuple_ptr.item0), - intmask(tuple_ptr.item1), - hlstr(tuple_ptr.item2), - intmask(tuple_ptr.item3), - hlstr(tuple_ptr.item4) - ) - self.get_location = get_location + # + flag = intmask(tuple_ptr.item0) + value_tuple = tuple_ptr.item1 + ntuple = () + for i,(_,t) in unrolled_types: + if t == "s": + ntuple += (hlstr(getattr(value_tuple, 'item' + str(i))),) + elif t == "i": + ntuple += (intmask(getattr(value_tuple, 'item' + str(i))),) + else: + raise NotImplementedError + return flag, ntuple + self.get_location_types = types + self.get_location = get_location + else: + self.get_location_types = None + self.get_location = None # printable_loc_ptr = self.jitdriver_sd._printable_loc_ptr if printable_loc_ptr is None: diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/jitlog.py @@ -0,0 +1,28 @@ + +# generic parameters +MP_STR = (0x0, "s") +MP_INT = (0x0, "i") + +# concrete parameters +MP_FILENAME = (0x1, "s") +MP_LINENO = (0x2, "i") +MP_INDEX = (0x4, "i") +MP_ENCLOSING = (0x8, "s") +MP_OPCODE = (0x10, "s") + +def get_type(flag): + pass + +def returns(*args): + """ Decorate your get_location function to specify the types. + Use MP_* constant as parameters. An example impl for get_location + would return the following: + + @returns(MP_FILENAME, MP_LINENO) + def get_location(...): + return ("a.py", 0) + """ + def decor(method): + method._loc_types = args + return method + return decor From pypy.commits at gmail.com Thu Apr 21 05:45:23 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 02:45:23 -0700 (PDT) Subject: [pypy-commit] cffi default: Move the FFI Interface and Conversion reference sections to their own page. Message-ID: <5718a133.a9a1c20a.da1a.ffff9b1a@mx.google.com> Author: Armin Rigo Branch: Changeset: r2671:a2bdd438efba Date: 2016-04-21 11:02 +0200 http://bitbucket.org/cffi/cffi/changeset/a2bdd438efba/ Log: Move the FFI Interface and Conversion reference sections to their own page. diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -216,7 +216,7 @@ .. __: using.html#working .. __: using.html#def-extern -.. __: using.html#ffi-new_handle +.. __: ref.html#ffi-new-handle .. __: cdef.html#cdef .. _`Using the ffi/lib objects`: using.html diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -17,6 +17,7 @@ installation overview using + ref cdef embedding diff --git a/doc/source/ref.rst b/doc/source/ref.rst new file mode 100644 --- /dev/null +++ b/doc/source/ref.rst @@ -0,0 +1,590 @@ +================================ +CFFI Reference +================================ + +.. contents:: + + +FFI Interface +------------- + + +ffi.NULL +++++++++ + +**ffi.NULL**: a constant NULL of type ````. + + +ffi.error ++++++++++ + +**ffi.error**: the Python exception raised in various cases. (Don't +confuse it with ``ffi.errno``.) + + +ffi.new() ++++++++++ + +**ffi.new(cdecl, init=None)**: +allocate an instance according to the specified C type and return a +pointer to it. The specified C type must be either a pointer or an +array: ``new('X *')`` allocates an X and returns a pointer to it, +whereas ``new('X[n]')`` allocates an array of n X'es and returns an +array referencing it (which works mostly like a pointer, like in C). +You can also use ``new('X[]', n)`` to allocate an array of a +non-constant length n. See the `detailed documentation`__ for other +valid initializers. + +.. __: using.html#working + +When the returned ```` object goes out of scope, the memory is +freed. In other words the returned ```` object has ownership of +the value of type ``cdecl`` that it points to. This means that the raw +data can be used as long as this object is kept alive, but must not be +used for a longer time. Be careful about that when copying the +pointer to the memory somewhere else, e.g. into another structure. + + +ffi.cast() +++++++++++ + +**ffi.cast("C type", value)**: similar to a C cast: returns an +instance of the named C type initialized with the given value. The +value is casted between integers or pointers of any type. + + +ffi.errno, ffi.getwinerror() +++++++++++++++++++++++++++++ + +**ffi.errno**: the value of ``errno`` received from the most recent C call +in this thread, and passed to the following C call. (This is a read-write +property.) + +**ffi.getwinerror(code=-1)**: on Windows, in addition to ``errno`` we +also save and restore the ``GetLastError()`` value across function +calls. This function returns this error code as a tuple ``(code, +message)``, adding a readable message like Python does when raising +WindowsError. If the argument ``code`` is given, format that code into +a message instead of using ``GetLastError()``. +(Note that it is also possible to declare and call the ``GetLastError()`` +function as usual.) + + +ffi.string(), ffi.unpack() +++++++++++++++++++++++++++ + +**ffi.string(cdata, [maxlen])**: return a Python string (or unicode +string) from the 'cdata'. + +- If 'cdata' is a pointer or array of characters or bytes, returns the + null-terminated string. The returned string extends until the first + null character, or at most 'maxlen' characters. If 'cdata' is an + array then 'maxlen' defaults to its length. See ``ffi.buffer()`` below + for a way to continue past the first null character. *Python 3:* this + returns a ``bytes``, not a ``str``. + +- If 'cdata' is a pointer or array of wchar_t, returns a unicode string + following the same rules. + +- If 'cdata' is a single character or byte or a wchar_t, returns it as a + byte string or unicode string. (Note that in some situation a single + wchar_t may require a Python unicode string of length 2.) + +- If 'cdata' is an enum, returns the value of the enumerator as a string. + If the value is out of range, it is simply returned as the stringified + integer. + +**ffi.unpack(...)**: XXXXXXXXXXX + + +ffi.buffer(), ffi.from_buffer() ++++++++++++++++++++++++++++++++ + +**ffi.buffer(cdata, [size])**: return a buffer object that references +the raw C data pointed to by the given 'cdata', of 'size' bytes. The +'cdata' must be a pointer or an array. If unspecified, the size of the +buffer is either the size of what ``cdata`` points to, or the whole size +of the array. Getting a buffer is useful because you can read from it +without an extra copy, or write into it to change the original value. + +Here are a few examples of where buffer() would be useful: + +- use ``file.write()`` and ``file.readinto()`` with + such a buffer (for files opened in binary mode) + +- use ``ffi.buffer(mystruct[0])[:] = socket.recv(len(buffer))`` to read + into a struct over a socket, rewriting the contents of mystruct[0] + +Remember that like in C, you can use ``array + index`` to get the pointer +to the index'th item of an array. + +The returned object is not a built-in buffer nor memoryview object, +because these objects' API changes too much across Python versions. +Instead it has the following Python API (a subset of Python 2's +``buffer``): + +- ``buf[:]`` or ``bytes(buf)``: fetch a copy as a regular byte string (or + ``buf[start:end]`` for a part) + +- ``buf[:] = newstr``: change the original content (or ``buf[start:end] + = newstr``) + +- ``len(buf), buf[index], buf[index] = newchar``: access as a sequence + of characters. + +The buffer object returned by ``ffi.buffer(cdata)`` keeps alive the +``cdata`` object: if it was originally an owning cdata, then its +owned memory will not be freed as long as the buffer is alive. + +Python 2/3 compatibility note: you should avoid using ``str(buf)``, +because it gives inconsistent results between Python 2 and Python 3. +(This is similar to how ``str()`` gives inconsistent results on regular +byte strings). Use ``buf[:]`` instead. + +**ffi.from_buffer(python_buffer)**: return a ```` that +points to the data of the given Python object, which must support the +buffer interface. This is the opposite of ``ffi.buffer()``. It gives +a reference to the existing data, not a copy; for this +reason, and for PyPy compatibility, it does not work with the built-in +types str or unicode or bytearray (or buffers/memoryviews on them). +It is meant to be used on objects +containing large quantities of raw data, like ``array.array`` or numpy +arrays. It supports both the old buffer API (in Python 2.x) and the +new memoryview API. Note that if you pass a read-only buffer object, +you still get a regular ````; it is your responsibility +not to write there if the original buffer doesn't expect you to. +The original object is kept alive (and, in case +of memoryview, locked) as long as the cdata object returned by +``ffi.from_buffer()`` is alive. *New in version 0.9.* + + +ffi.memmove() ++++++++++++++ + +**ffi.memmove(dest, src, n)**: copy ``n`` bytes from memory area +``src`` to memory area ``dest``. See examples below. Inspired by the +C functions ``memcpy()`` and ``memmove()``---like the latter, the +areas can overlap. Each of ``dest`` and ``src`` can be either a cdata +pointer or a Python object supporting the buffer/memoryview interface. +In the case of ``dest``, the buffer/memoryview must be writable. +Unlike ``ffi.from_buffer()``, there are no restrictions on the type of +buffer. *New in version 1.3.* Examples: + +* ``ffi.memmove(myptr, b"hello", 5)`` copies the 5 bytes of + ``b"hello"`` to the area that ``myptr`` points to. + +* ``ba = bytearray(100); ffi.memmove(ba, myptr, 100)`` copies 100 + bytes from ``myptr`` into the bytearray ``ba``. + +* ``ffi.memmove(myptr + 1, myptr, 100)`` shifts 100 bytes from + the memory at ``myptr`` to the memory at ``myptr + 1``. + + +ffi.typeof(), ffi.sizeof(), ffi.alignof() ++++++++++++++++++++++++++++++++++++++++++ + +**ffi.typeof("C type" or cdata object)**: return an object of type +```` corresponding to the parsed string, or to the C type of the +cdata instance. Usually you don't need to call this function or to +explicitly manipulate ```` objects in your code: any place that +accepts a C type can receive either a string or a pre-parsed ``ctype`` +object (and because of caching of the string, there is no real +performance difference). It can still be useful in writing typechecks, +e.g.: + +.. code-block:: python + + def myfunction(ptr): + assert ffi.typeof(ptr) is ffi.typeof("foo_t*") + ... + +Note also that the mapping from strings like ``"foo_t*"`` to the +```` objects is stored in some internal dictionary. This +guarantees that there is only one ```` object, so you +can use the ``is`` operator to compare it. The downside is that the +dictionary entries are immortal for now. In the future, we may add +transparent reclamation of old, unused entries. In the meantime, note +that using strings like ``"int[%d]" % length`` to name a type will +create many immortal cached entries if called with many different +lengths. + +**ffi.sizeof("C type" or cdata object)**: return the size of the +argument in bytes. The argument can be either a C type, or a cdata object, +like in the equivalent ``sizeof`` operator in C. + +**ffi.alignof("C type")**: return the natural alignment size in bytes of +the argument. Corresponds to the ``__alignof__`` operator in GCC. + + +.. _ffi-addressof: + +ffi.offsetof(), ffi.addressof() ++++++++++++++++++++++++++++++++ + +**ffi.offsetof("C struct or array type", \*fields_or_indexes)**: return the +offset within the struct of the given field. Corresponds to ``offsetof()`` +in C. + +*New in version 0.9:* +You can give several field names in case of nested structures. You +can also give numeric values which correspond to array items, in case +of a pointer or array type. For example, ``ffi.offsetof("int[5]", 2)`` +is equal to the size of two integers, as is ``ffi.offsetof("int *", 2)``. + + +**ffi.addressof(cdata, \*fields_or_indexes)**: limited equivalent to +the '&' operator in C: + +1. ``ffi.addressof()`` returns a cdata that +is a pointer to this struct or union. The returned pointer is only +valid as long as the original ``cdata`` object is; be sure to keep it +alive if it was obtained directly from ``ffi.new()``. + +2. ``ffi.addressof(, field-or-index...)`` returns the address +of a field or array item inside the given structure or array. In case +of nested structures or arrays, you can give more than one field or +index to look recursively. Note that ``ffi.addressof(array, index)`` +can also be expressed as ``array + index``: this is true both in CFFI +and in C, where ``&array[index]`` is just ``array + index``. + +3. ``ffi.addressof(, "name")`` returns the address of the +named function or global variable from the given library object. +*New in version 1.1:* for functions, it returns a regular cdata +object containing a pointer to the function. + +Note that the case 1. cannot be used to take the address of a +primitive or pointer, but only a struct or union. It would be +difficult to implement because only structs and unions are internally +stored as an indirect pointer to the data. If you need a C int whose +address can be taken, use ``ffi.new("int[1]")`` in the first place; +similarly, for a pointer, use ``ffi.new("foo_t *[1]")``. + + +ffi.CData, ffi.CType +++++++++++++++++++++ + +**ffi.CData, ffi.CType**: the Python type of the objects referred to +as ```` and ```` in the rest of this document. Note +that some cdata objects may be actually of a subclass of +``ffi.CData``, and similarly with ctype, so you should check with +``if isinstance(x, ffi.CData)``. Also, ```` objects have +a number of attributes for introspection: ``kind`` and ``cname`` are +always present, and depending on the kind they may also have +``item``, ``length``, ``fields``, ``args``, ``result``, ``ellipsis``, +``abi``, ``elements`` and ``relements``. + + +ffi.gc() +++++++++ + +**ffi.gc(cdata, destructor)**: return a new cdata object that points to the +same data. Later, when this new cdata object is garbage-collected, +``destructor(old_cdata_object)`` will be called. Example of usage: +``ptr = ffi.gc(lib.malloc(42), lib.free)``. Note that like objects +returned by ``ffi.new()``, the returned pointer objects have *ownership*, +which means the destructor is called as soon as *this* exact returned +object is garbage-collected. + +Note that this should be avoided for large memory allocations or +for limited resources. This is particularly true on PyPy: its GC does +not know how much memory or how many resources the returned ``ptr`` +holds. It will only run its GC when enough memory it knows about has +been allocated (and thus run the destructor possibly later than you +would expect). Moreover, the destructor is called in whatever thread +PyPy is at that moment, which might be a problem for some C libraries. +In these cases, consider writing a wrapper class with custom ``__enter__()`` +and ``__exit__()`` methods, allocating and freeing the C data at known +points in time, and using it in a ``with`` statement. + + +ffi.new_handle(), ffi.from_handle() ++++++++++++++++++++++++++++++++++++ + +**ffi.new_handle(python_object)**: return a non-NULL cdata of type +``void *`` that contains an opaque reference to ``python_object``. You +can pass it around to C functions or store it into C structures. Later, +you can use **ffi.from_handle(p)** to retrieve the original +``python_object`` from a value with the same ``void *`` pointer. +*Calling ffi.from_handle(p) is invalid and will likely crash if +the cdata object returned by new_handle() is not kept alive!* + +(In case you are wondering, this ``void *`` is not the ``PyObject *`` +pointer. This wouldn't make sense on PyPy anyway.) + +The ``ffi.new_handle()/from_handle()`` functions *conceptually* work +like this: + +* ``new_handle()`` returns cdata objects that contains references to + the Python objects; we call them collectively the "handle" cdata + objects. The ``void *`` value in these handle cdata objects are + random but unique. + +* ``from_handle(p)`` searches all live "handle" cdata objects for the + one that has the same value ``p`` as its ``void *`` value. It then + returns the Python object referenced by that handle cdata object. + If none is found, you get "undefined behavior" (i.e. crashes). + +The "handle" cdata object keeps the Python object alive, similar to +how ``ffi.new()`` returns a cdata object that keeps a piece of memory +alive. If the handle cdata object *itself* is not alive any more, +then the association ``void * -> python_object`` is dead and +``from_handle()`` will crash. + +*New in version 1.4:* two calls to ``new_handle(x)`` are guaranteed to +return cdata objects with different ``void *`` values, even with the +same ``x``. This is a useful feature that avoids issues with unexpected +duplicates in the following trick: if you need to keep alive the +"handle" until explicitly asked to free it, but don't have a natural +Python-side place to attach it to, then the easiest is to ``add()`` it +to a global set. It can later be removed from the set by +``global_set.discard(p)``, with ``p`` any cdata object whose ``void *`` +value compares equal. + + +ffi.dlopen(), ffi.dlclose() ++++++++++++++++++++++++++++ + +**ffi.dlopen(libpath, [flags])**: opens and returns a "handle" to a +dynamic library, as a ```` object. See `Preparing and +Distributing modules`_. + +**ffi.dlclose(lib)**: explicitly closes a ```` object returned +by ``ffi.dlopen()``. + +**ffi.RLTD_...**: constants: flags for ``ffi.dlopen()``. + + +ffi.new_allocator() ++++++++++++++++++++ + +**ffi.new_allocator(alloc=None, free=None, should_clear_after_alloc=True)**: +returns a new allocator. An "allocator" is a callable that behaves like +``ffi.new()`` but uses the provided low-level ``alloc`` and ``free`` +functions. *New in version 1.2.* + +``alloc()`` is invoked with the size as sole argument. If it returns +NULL, a MemoryError is raised. Later, if ``free`` is not None, it will +be called with the result of ``alloc()`` as argument. Both can be either +Python function or directly C functions. If only ``free`` is None, then no +free function is called. If both ``alloc`` and ``free`` are None, the +default alloc/free combination is used. (In other words, the call +``ffi.new(*args)`` is equivalent to ``ffi.new_allocator()(*args)``.) + +If ``should_clear_after_alloc`` is set to False, then the memory +returned by ``alloc()`` is assumed to be already cleared (or you are +fine with garbage); otherwise CFFI will clear it. + + +ffi.init_once() ++++++++++++++++ + +**ffi.init_once(function, tag)**: run ``function()`` once. The +``tag`` should be a primitive object, like a string, that identifies +the function: ``function()`` is only called the first time we see the +``tag``. The return value of ``function()`` is remembered and +returned by the current and all future ``init_once()`` with the same +tag. If ``init_once()`` is called from multiple threads in parallel, +all calls block until the execution of ``function()`` is done. If +``function()`` raises an exception, it is propagated and nothing is +cached (i.e. ``function()`` will be called again, in case we catch the +exception and try ``init_once()`` again). *New in version 1.4.* + +Example:: + + from _xyz_cffi import ffi, lib + + def initlib(): + lib.init_my_library() + + def make_new_foo(): + ffi.init_once(initlib, "init") + return lib.make_foo() + +``init_once()`` is optimized to run very quickly if ``function()`` has +already been called. (On PyPy, the cost is zero---the JIT usually +removes everything in the machine code it produces.) + +*Note:* one motivation__ for ``init_once()`` is the CPython notion of +"subinterpreters" in the embedded case. If you are using the +out-of-line API mode, ``function()`` is called only once even in the +presence of multiple subinterpreters, and its return value is shared +among all subinterpreters. The goal is to mimic the way traditional +CPython C extension modules have their init code executed only once in +total even if there are subinterpreters. In the example above, the C +function ``init_my_library()`` is called once in total, not once per +subinterpreter. For this reason, avoid Python-level side-effects in +``function()`` (as they will only be applied in the first +subinterpreter to run); instead, return a value, as in the following +example:: + + def init_get_max(): + return lib.initialize_once_and_get_some_maximum_number() + + def process(i): + if i > ffi.init_once(init_get_max, "max"): + raise IndexError("index too large!") + ... + +.. __: https://bitbucket.org/cffi/cffi/issues/233/ + + +ffi.getctype(), ffi.list_types() +++++++++++++++++++++++++++++++++ + +**ffi.getctype("C type" or , extra="")**: return the string +representation of the given C type. If non-empty, the "extra" string is +appended (or inserted at the right place in more complicated cases); it +can be the name of a variable to declare, or an extra part of the type +like ``"*"`` or ``"[5]"``. For example +``ffi.getctype(ffi.typeof(x), "*")`` returns the string representation +of the C type "pointer to the same type than x"; and +``ffi.getctype("char[80]", "a") == "char a[80]"``. + +**ffi.list_types()**: Returns the user type names known to this FFI +instance. This returns a tuple containing three lists of names: +``(typedef_names, names_of_structs, names_of_unions)``. *New in +version 1.6.* + + +.. _`Preparing and Distributing modules`: cdef.html#loading-libraries + + +Conversions +----------- + +This section documents all the conversions that are allowed when +*writing into* a C data structure (or passing arguments to a function +call), and *reading from* a C data structure (or getting the result of a +function call). The last column gives the type-specific operations +allowed. + ++---------------+------------------------+------------------+----------------+ +| C type | writing into | reading from |other operations| ++===============+========================+==================+================+ +| integers | an integer or anything | a Python int or | int() | +| and enums | on which int() works | long, depending | | +| `(*****)` | (but not a float!). | on the type | | +| | Must be within range. | | | ++---------------+------------------------+------------------+----------------+ +| ``char`` | a string of length 1 | a string of | int() | +| | or another | length 1 | | ++---------------+------------------------+------------------+----------------+ +| ``wchar_t`` | a unicode of length 1 | a unicode of | | +| | (or maybe 2 if | length 1 | int() | +| | surrogates) or | (or maybe 2 if | | +| | another | surrogates) | | ++---------------+------------------------+------------------+----------------+ +| ``float``, | a float or anything on | a Python float | float(), int() | +| ``double`` | which float() works | | | ++---------------+------------------------+------------------+----------------+ +|``long double``| another with | a , to | float(), int() | +| | a ``long double``, or | avoid loosing | | +| | anything on which | precision `(***)`| | +| | float() works | | | ++---------------+------------------------+------------------+----------------+ +| pointers | another with | a |``[]`` `(****)`,| +| | a compatible type (i.e.| |``+``, ``-``, | +| | same type or ``char*`` | |bool() | +| | or ``void*``, or as an | | | +| | array instead) `(*)` | | | ++---------------+------------------------+ | | +| ``void *``, | another with | | | +| ``char *`` | any pointer or array | | | +| | type | | | ++---------------+------------------------+ +----------------+ +| pointers to | same as pointers | | ``[]``, ``+``, | +| structure or | | | ``-``, bool(), | +| union | | | and read/write | +| | | | struct fields | ++---------------+------------------------+ +----------------+ +| function | same as pointers | | bool(), | +| pointers | | | call `(**)` | ++---------------+------------------------+------------------+----------------+ +| arrays | a list or tuple of | a |len(), iter(), | +| | items | |``[]`` `(****)`,| +| | | |``+``, ``-`` | ++---------------+------------------------+ +----------------+ +| ``char[]`` | same as arrays, or a | | len(), iter(), | +| | Python string | | ``[]``, ``+``, | +| | | | ``-`` | ++---------------+------------------------+ +----------------+ +| ``wchar_t[]`` | same as arrays, or a | | len(), iter(), | +| | Python unicode | | ``[]``, | +| | | | ``+``, ``-`` | +| | | | | ++---------------+------------------------+------------------+----------------+ +| structure | a list or tuple or | a | read/write | +| | dict of the field | | fields | +| | values, or a same-type | | | +| | | | | ++---------------+------------------------+ +----------------+ +| union | same as struct, but | | read/write | +| | with at most one field | | fields | ++---------------+------------------------+------------------+----------------+ + +`(*)` ``item *`` is ``item[]`` in function arguments: + + In a function declaration, as per the C standard, a ``item *`` + argument is identical to a ``item[]`` argument (and ``ffi.cdef()`` + doesn't record the difference). So when you call such a function, + you can pass an argument that is accepted by either C type, like + for example passing a Python string to a ``char *`` argument + (because it works for ``char[]`` arguments) or a list of integers + to a ``int *`` argument (it works for ``int[]`` arguments). Note + that even if you want to pass a single ``item``, you need to + specify it in a list of length 1; for example, a ``struct point_s + *`` argument might be passed as ``[[x, y]]`` or ``[{'x': 5, 'y': + 10}]``. + + As an optimization, the CPython version of CFFI assumes that a + function with a ``char *`` argument to which you pass a Python + string will not actually modify the array of characters passed in, + and so passes directly a pointer inside the Python string object. + (PyPy might in the future do the same, but it is harder because + strings are not naturally zero-terminated in PyPy.) + +`(**)` C function calls are done with the GIL released. + + Note that we assume that the called functions are *not* using the + Python API from Python.h. For example, we don't check afterwards + if they set a Python exception. You may work around it, but mixing + CFFI with ``Python.h`` is not recommended. (If you do that, on + PyPy and on some platforms like Windows, you may need to explicitly + link to ``libpypy-c.dll`` to access the CPython C API compatibility + layer; indeed, CFFI-generated modules on PyPy don't link to + ``libpypy-c.dll`` on their own. But really, don't do that in the + first place.) + +`(***)` ``long double`` support: + + We keep ``long double`` values inside a cdata object to avoid + loosing precision. Normal Python floating-point numbers only + contain enough precision for a ``double``. If you really want to + convert such an object to a regular Python float (i.e. a C + ``double``), call ``float()``. If you need to do arithmetic on + such numbers without any precision loss, you need instead to define + and use a family of C functions like ``long double add(long double + a, long double b);``. + +`(****)` Slicing with ``x[start:stop]``: + + Slicing is allowed, as long as you specify explicitly both ``start`` + and ``stop`` (and don't give any ``step``). It gives a cdata + object that is a "view" of all items from ``start`` to ``stop``. + It is a cdata of type "array" (so e.g. passing it as an argument to a + C function would just convert it to a pointer to the ``start`` item). + As with indexing, negative bounds mean really negative indices, like in + C. As for slice assignment, it accepts any iterable, including a list + of items or another array-like cdata object, but the length must match. + (Note that this behavior differs from initialization: e.g. you can + say ``chararray[10:15] = "hello"``, but the assigned string must be of + exactly the correct length; no implicit null character is added.) + +`(*****)` Enums are handled like ints: + + Like C, enum types are mostly int types (unsigned or signed, int or + long; note that GCC's first choice is unsigned). Reading an enum + field of a structure, for example, returns you an integer. To + compare their value symbolically, use code like ``if x.field == + lib.FOO``. If you really want to get their value as a string, use + ``ffi.string(ffi.cast("the_enum_type", x.field))``. diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -96,7 +96,9 @@ There is no general equivalent to the ``&`` operator in C (because it would not fit nicely in the model, and it does not seem to be needed -here). But see `ffi.addressof()`_. +here). But see `ffi.addressof()`__. + +.. __: ref.html#ffi-addressof Any operation that would in C return a pointer or array or struct type gives you a fresh cdata object. Unlike the "original" one, these fresh @@ -330,10 +332,12 @@ lib.do_something_with_array([1, 2, 3, 4, 5]) -See `Reference: conversions`_ for a similar way to pass ``struct foo_s +See `Reference: conversions`__ for a similar way to pass ``struct foo_s *`` arguments---but in general, it is clearer to simply pass ``ffi.new('struct foo_s *', initializer)``. +__ ref.html#conversions + CFFI supports passing and returning structs to functions and callbacks. Example: @@ -721,7 +725,9 @@ keep this object alive for as long as the callback may be invoked. The easiest way to do that is to always use ``@ffi.callback()`` at module-level only, and to pass "context" information around with -`ffi.new_handle()`_, if possible. Example: +`ffi.new_handle()`__, if possible. Example: + +.. __: ref.html#new-handle .. code-block:: python @@ -849,530 +855,6 @@ FFI Interface ------------- -**ffi.new(cdecl, init=None)**: -allocate an instance according to the specified C type and return a -pointer to it. The specified C type must be either a pointer or an -array: ``new('X *')`` allocates an X and returns a pointer to it, -whereas ``new('X[n]')`` allocates an array of n X'es and returns an -array referencing it (which works mostly like a pointer, like in C). -You can also use ``new('X[]', n)`` to allocate an array of a -non-constant length n. See above__ for other valid initializers. +(The reference for the FFI interface has been moved to the `next page`__.) -.. __: working_ - -When the returned ```` object goes out of scope, the memory is -freed. In other words the returned ```` object has ownership of -the value of type ``cdecl`` that it points to. This means that the raw -data can be used as long as this object is kept alive, but must not be -used for a longer time. Be careful about that when copying the -pointer to the memory somewhere else, e.g. into another structure. - -**ffi.cast("C type", value)**: similar to a C cast: returns an -instance of the named C type initialized with the given value. The -value is casted between integers or pointers of any type. - -**ffi.error**: the Python exception raised in various cases. (Don't -confuse it with ``ffi.errno``.) - -**ffi.errno**: the value of ``errno`` received from the most recent C call -in this thread, and passed to the following C call. (This is a read-write -property.) - -**ffi.getwinerror(code=-1)**: on Windows, in addition to ``errno`` we -also save and restore the ``GetLastError()`` value across function -calls. This function returns this error code as a tuple ``(code, -message)``, adding a readable message like Python does when raising -WindowsError. If the argument ``code`` is given, format that code into -a message instead of using ``GetLastError()``. -(Note that it is also possible to declare and call the ``GetLastError()`` -function as usual.) - -**ffi.string(cdata, [maxlen])**: return a Python string (or unicode -string) from the 'cdata'. - -- If 'cdata' is a pointer or array of characters or bytes, returns the - null-terminated string. The returned string extends until the first - null character, or at most 'maxlen' characters. If 'cdata' is an - array then 'maxlen' defaults to its length. See ``ffi.buffer()`` below - for a way to continue past the first null character. *Python 3:* this - returns a ``bytes``, not a ``str``. - -- If 'cdata' is a pointer or array of wchar_t, returns a unicode string - following the same rules. - -- If 'cdata' is a single character or byte or a wchar_t, returns it as a - byte string or unicode string. (Note that in some situation a single - wchar_t may require a Python unicode string of length 2.) - -- If 'cdata' is an enum, returns the value of the enumerator as a string. - If the value is out of range, it is simply returned as the stringified - integer. - - -**ffi.buffer(cdata, [size])**: return a buffer object that references -the raw C data pointed to by the given 'cdata', of 'size' bytes. The -'cdata' must be a pointer or an array. If unspecified, the size of the -buffer is either the size of what ``cdata`` points to, or the whole size -of the array. Getting a buffer is useful because you can read from it -without an extra copy, or write into it to change the original value. - -Here are a few examples of where buffer() would be useful: - -- use ``file.write()`` and ``file.readinto()`` with - such a buffer (for files opened in binary mode) - -- use ``ffi.buffer(mystruct[0])[:] = socket.recv(len(buffer))`` to read - into a struct over a socket, rewriting the contents of mystruct[0] - -Remember that like in C, you can use ``array + index`` to get the pointer -to the index'th item of an array. - -The returned object is not a built-in buffer nor memoryview object, -because these objects' API changes too much across Python versions. -Instead it has the following Python API (a subset of Python 2's -``buffer``): - -- ``buf[:]`` or ``bytes(buf)``: fetch a copy as a regular byte string (or - ``buf[start:end]`` for a part) - -- ``buf[:] = newstr``: change the original content (or ``buf[start:end] - = newstr``) - -- ``len(buf), buf[index], buf[index] = newchar``: access as a sequence - of characters. - -The buffer object returned by ``ffi.buffer(cdata)`` keeps alive the -``cdata`` object: if it was originally an owning cdata, then its -owned memory will not be freed as long as the buffer is alive. - -Python 2/3 compatibility note: you should avoid using ``str(buf)``, -because it gives inconsistent results between Python 2 and Python 3. -(This is similar to how ``str()`` gives inconsistent results on regular -byte strings). Use ``buf[:]`` instead. - -**ffi.from_buffer(python_buffer)**: return a ```` that -points to the data of the given Python object, which must support the -buffer interface. This is the opposite of ``ffi.buffer()``. It gives -a reference to the existing data, not a copy; for this -reason, and for PyPy compatibility, it does not work with the built-in -types str or unicode or bytearray (or buffers/memoryviews on them). -It is meant to be used on objects -containing large quantities of raw data, like ``array.array`` or numpy -arrays. It supports both the old buffer API (in Python 2.x) and the -new memoryview API. Note that if you pass a read-only buffer object, -you still get a regular ````; it is your responsibility -not to write there if the original buffer doesn't expect you to. -The original object is kept alive (and, in case -of memoryview, locked) as long as the cdata object returned by -``ffi.from_buffer()`` is alive. *New in version 0.9.* - - -.. _memmove: - -**ffi.memmove(dest, src, n)**: copy ``n`` bytes from memory area -``src`` to memory area ``dest``. See examples below. Inspired by the -C functions ``memcpy()`` and ``memmove()``---like the latter, the -areas can overlap. Each of ``dest`` and ``src`` can be either a cdata -pointer or a Python object supporting the buffer/memoryview interface. -In the case of ``dest``, the buffer/memoryview must be writable. -Unlike ``ffi.from_buffer()``, there are no restrictions on the type of -buffer. *New in version 1.3.* Examples: - -* ``ffi.memmove(myptr, b"hello", 5)`` copies the 5 bytes of - ``b"hello"`` to the area that ``myptr`` points to. - -* ``ba = bytearray(100); ffi.memmove(ba, myptr, 100)`` copies 100 - bytes from ``myptr`` into the bytearray ``ba``. - -* ``ffi.memmove(myptr + 1, myptr, 100)`` shifts 100 bytes from - the memory at ``myptr`` to the memory at ``myptr + 1``. - - -**ffi.typeof("C type" or cdata object)**: return an object of type -```` corresponding to the parsed string, or to the C type of the -cdata instance. Usually you don't need to call this function or to -explicitly manipulate ```` objects in your code: any place that -accepts a C type can receive either a string or a pre-parsed ``ctype`` -object (and because of caching of the string, there is no real -performance difference). It can still be useful in writing typechecks, -e.g.: - -.. code-block:: python - - def myfunction(ptr): - assert ffi.typeof(ptr) is ffi.typeof("foo_t*") - ... - -Note also that the mapping from strings like ``"foo_t*"`` to the -```` objects is stored in some internal dictionary. This -guarantees that there is only one ```` object, so you -can use the ``is`` operator to compare it. The downside is that the -dictionary entries are immortal for now. In the future, we may add -transparent reclamation of old, unused entries. In the meantime, note -that using strings like ``"int[%d]" % length`` to name a type will -create many immortal cached entries if called with many different -lengths. - -**ffi.CData, ffi.CType**: the Python type of the objects referred to -as ```` and ```` in the rest of this document. Note -that some cdata objects may be actually of a subclass of -``ffi.CData``, and similarly with ctype, so you should check with -``if isinstance(x, ffi.CData)``. Also, ```` objects have -a number of attributes for introspection: ``kind`` and ``cname`` are -always present, and depending on the kind they may also have -``item``, ``length``, ``fields``, ``args``, ``result``, ``ellipsis``, -``abi``, ``elements`` and ``relements``. - -**ffi.NULL**: a constant NULL of type ````. - -**ffi.sizeof("C type" or cdata object)**: return the size of the -argument in bytes. The argument can be either a C type, or a cdata object, -like in the equivalent ``sizeof`` operator in C. - -**ffi.alignof("C type")**: return the natural alignment size in bytes of -the argument. Corresponds to the ``__alignof__`` operator in GCC. - - -**ffi.offsetof("C struct or array type", \*fields_or_indexes)**: return the -offset within the struct of the given field. Corresponds to ``offsetof()`` -in C. - -*New in version 0.9:* -You can give several field names in case of nested structures. You -can also give numeric values which correspond to array items, in case -of a pointer or array type. For example, ``ffi.offsetof("int[5]", 2)`` -is equal to the size of two integers, as is ``ffi.offsetof("int *", 2)``. - - -**ffi.getctype("C type" or , extra="")**: return the string -representation of the given C type. If non-empty, the "extra" string is -appended (or inserted at the right place in more complicated cases); it -can be the name of a variable to declare, or an extra part of the type -like ``"*"`` or ``"[5]"``. For example -``ffi.getctype(ffi.typeof(x), "*")`` returns the string representation -of the C type "pointer to the same type than x"; and -``ffi.getctype("char[80]", "a") == "char a[80]"``. - - -**ffi.gc(cdata, destructor)**: return a new cdata object that points to the -same data. Later, when this new cdata object is garbage-collected, -``destructor(old_cdata_object)`` will be called. Example of usage: -``ptr = ffi.gc(lib.malloc(42), lib.free)``. Note that like objects -returned by ``ffi.new()``, the returned pointer objects have *ownership*, -which means the destructor is called as soon as *this* exact returned -object is garbage-collected. - -Note that this should be avoided for large memory allocations or -for limited resources. This is particularly true on PyPy: its GC does -not know how much memory or how many resources the returned ``ptr`` -holds. It will only run its GC when enough memory it knows about has -been allocated (and thus run the destructor possibly later than you -would expect). Moreover, the destructor is called in whatever thread -PyPy is at that moment, which might be a problem for some C libraries. -In these cases, consider writing a wrapper class with custom ``__enter__()`` -and ``__exit__()`` methods, allocating and freeing the C data at known -points in time, and using it in a ``with`` statement. - - -.. _ffi-new_handle: -.. _`ffi.new_handle()`: - -**ffi.new_handle(python_object)**: return a non-NULL cdata of type -``void *`` that contains an opaque reference to ``python_object``. You -can pass it around to C functions or store it into C structures. Later, -you can use **ffi.from_handle(p)** to retrieve the original -``python_object`` from a value with the same ``void *`` pointer. -*Calling ffi.from_handle(p) is invalid and will likely crash if -the cdata object returned by new_handle() is not kept alive!* - -(In case you are wondering, this ``void *`` is not the ``PyObject *`` -pointer. This wouldn't make sense on PyPy anyway.) - -The ``ffi.new_handle()/from_handle()`` functions *conceptually* work -like this: - -* ``new_handle()`` returns cdata objects that contains references to - the Python objects; we call them collectively the "handle" cdata - objects. The ``void *`` value in these handle cdata objects are - random but unique. - -* ``from_handle(p)`` searches all live "handle" cdata objects for the - one that has the same value ``p`` as its ``void *`` value. It then - returns the Python object referenced by that handle cdata object. - If none is found, you get "undefined behavior" (i.e. crashes). - -The "handle" cdata object keeps the Python object alive, similar to -how ``ffi.new()`` returns a cdata object that keeps a piece of memory -alive. If the handle cdata object *itself* is not alive any more, -then the association ``void * -> python_object`` is dead and -``from_handle()`` will crash. - -*New in version 1.4:* two calls to ``new_handle(x)`` are guaranteed to -return cdata objects with different ``void *`` values, even with the -same ``x``. This is a useful feature that avoids issues with unexpected -duplicates in the following trick: if you need to keep alive the -"handle" until explicitly asked to free it, but don't have a natural -Python-side place to attach it to, then the easiest is to ``add()`` it -to a global set. It can later be removed from the set by -``global_set.discard(p)``, with ``p`` any cdata object whose ``void *`` -value compares equal. - - -.. _`ffi.addressof()`: - -**ffi.addressof(cdata, \*fields_or_indexes)**: limited equivalent to -the '&' operator in C: - -1. ``ffi.addressof()`` returns a cdata that -is a pointer to this struct or union. The returned pointer is only -valid as long as the original ``cdata`` object is; be sure to keep it -alive if it was obtained directly from ``ffi.new()``. - -2. ``ffi.addressof(, field-or-index...)`` returns the address -of a field or array item inside the given structure or array. In case -of nested structures or arrays, you can give more than one field or -index to look recursively. Note that ``ffi.addressof(array, index)`` -can also be expressed as ``array + index``: this is true both in CFFI -and in C, where ``&array[index]`` is just ``array + index``. - -3. ``ffi.addressof(, "name")`` returns the address of the -named function or global variable from the given library object. -*New in version 1.1:* for functions, it returns a regular cdata -object containing a pointer to the function. - -Note that the case 1. cannot be used to take the address of a -primitive or pointer, but only a struct or union. It would be -difficult to implement because only structs and unions are internally -stored as an indirect pointer to the data. If you need a C int whose -address can be taken, use ``ffi.new("int[1]")`` in the first place; -similarly, for a pointer, use ``ffi.new("foo_t *[1]")``. - - -**ffi.dlopen(libpath, [flags])**: opens and returns a "handle" to a -dynamic library, as a ```` object. See `Preparing and -Distributing modules`_. - -**ffi.dlclose(lib)**: explicitly closes a ```` object returned -by ``ffi.dlopen()``. - -**ffi.RLTD_...**: constants: flags for ``ffi.dlopen()``. - - -.. _`alternative allocators`: - -**ffi.new_allocator(alloc=None, free=None, should_clear_after_alloc=True)**: -returns a new allocator. An "allocator" is a callable that behaves like -``ffi.new()`` but uses the provided low-level ``alloc`` and ``free`` -functions. *New in version 1.2.* - -``alloc()`` is invoked with the size as sole argument. If it returns -NULL, a MemoryError is raised. Later, if ``free`` is not None, it will -be called with the result of ``alloc()`` as argument. Both can be either -Python function or directly C functions. If only ``free`` is None, then no -free function is called. If both ``alloc`` and ``free`` are None, the -default alloc/free combination is used. (In other words, the call -``ffi.new(*args)`` is equivalent to ``ffi.new_allocator()(*args)``.) - -If ``should_clear_after_alloc`` is set to False, then the memory -returned by ``alloc()`` is assumed to be already cleared (or you are -fine with garbage); otherwise CFFI will clear it. - -.. _initonce: - -**ffi.init_once(function, tag)**: run ``function()`` once. The -``tag`` should be a primitive object, like a string, that identifies -the function: ``function()`` is only called the first time we see the -``tag``. The return value of ``function()`` is remembered and -returned by the current and all future ``init_once()`` with the same -tag. If ``init_once()`` is called from multiple threads in parallel, -all calls block until the execution of ``function()`` is done. If -``function()`` raises an exception, it is propagated and nothing is -cached (i.e. ``function()`` will be called again, in case we catch the -exception and try ``init_once()`` again). *New in version 1.4.* - -Example:: - - from _xyz_cffi import ffi, lib - - def initlib(): - lib.init_my_library() - - def make_new_foo(): - ffi.init_once(initlib, "init") - return lib.make_foo() - -``init_once()`` is optimized to run very quickly if ``function()`` has -already been called. (On PyPy, the cost is zero---the JIT usually -removes everything in the machine code it produces.) - -*Note:* one motivation__ for ``init_once()`` is the CPython notion of -"subinterpreters" in the embedded case. If you are using the -out-of-line API mode, ``function()`` is called only once even in the -presence of multiple subinterpreters, and its return value is shared -among all subinterpreters. The goal is to mimic the way traditional -CPython C extension modules have their init code executed only once in -total even if there are subinterpreters. In the example above, the C -function ``init_my_library()`` is called once in total, not once per -subinterpreter. For this reason, avoid Python-level side-effects in -``function()`` (as they will only be applied in the first -subinterpreter to run); instead, return a value, as in the following -example:: - - def init_get_max(): - return lib.initialize_once_and_get_some_maximum_number() - - def process(i): - if i > ffi.init_once(init_get_max, "max"): - raise IndexError("index too large!") - ... - -.. __: https://bitbucket.org/cffi/cffi/issues/233/ - -**ffi.list_types()**: Returns the user type names known to this FFI -instance. This returns a tuple containing three lists of names: -``(typedef_names, names_of_structs, names_of_unions)``. *New in -version 1.6.* - - -.. _`Preparing and Distributing modules`: cdef.html#loading-libraries - - -Reference: conversions ----------------------- - -This section documents all the conversions that are allowed when -*writing into* a C data structure (or passing arguments to a function -call), and *reading from* a C data structure (or getting the result of a -function call). The last column gives the type-specific operations -allowed. - -+---------------+------------------------+------------------+----------------+ -| C type | writing into | reading from |other operations| -+===============+========================+==================+================+ -| integers | an integer or anything | a Python int or | int() | -| and enums | on which int() works | long, depending | | -| `(*****)` | (but not a float!). | on the type | | -| | Must be within range. | | | -+---------------+------------------------+------------------+----------------+ -| ``char`` | a string of length 1 | a string of | int() | -| | or another | length 1 | | -+---------------+------------------------+------------------+----------------+ -| ``wchar_t`` | a unicode of length 1 | a unicode of | | -| | (or maybe 2 if | length 1 | int() | -| | surrogates) or | (or maybe 2 if | | -| | another | surrogates) | | -+---------------+------------------------+------------------+----------------+ -| ``float``, | a float or anything on | a Python float | float(), int() | -| ``double`` | which float() works | | | -+---------------+------------------------+------------------+----------------+ -|``long double``| another with | a , to | float(), int() | -| | a ``long double``, or | avoid loosing | | -| | anything on which | precision `(***)`| | -| | float() works | | | -+---------------+------------------------+------------------+----------------+ -| pointers | another with | a |``[]`` `(****)`,| -| | a compatible type (i.e.| |``+``, ``-``, | -| | same type or ``char*`` | |bool() | -| | or ``void*``, or as an | | | -| | array instead) `(*)` | | | -+---------------+------------------------+ | | -| ``void *``, | another with | | | -| ``char *`` | any pointer or array | | | -| | type | | | -+---------------+------------------------+ +----------------+ -| pointers to | same as pointers | | ``[]``, ``+``, | -| structure or | | | ``-``, bool(), | -| union | | | and read/write | -| | | | struct fields | -+---------------+------------------------+ +----------------+ -| function | same as pointers | | bool(), | -| pointers | | | call `(**)` | -+---------------+------------------------+------------------+----------------+ -| arrays | a list or tuple of | a |len(), iter(), | -| | items | |``[]`` `(****)`,| -| | | |``+``, ``-`` | -+---------------+------------------------+ +----------------+ -| ``char[]`` | same as arrays, or a | | len(), iter(), | -| | Python string | | ``[]``, ``+``, | -| | | | ``-`` | -+---------------+------------------------+ +----------------+ -| ``wchar_t[]`` | same as arrays, or a | | len(), iter(), | -| | Python unicode | | ``[]``, | -| | | | ``+``, ``-`` | -| | | | | -+---------------+------------------------+------------------+----------------+ -| structure | a list or tuple or | a | read/write | -| | dict of the field | | fields | -| | values, or a same-type | | | -| | | | | -+---------------+------------------------+ +----------------+ -| union | same as struct, but | | read/write | -| | with at most one field | | fields | -+---------------+------------------------+------------------+----------------+ - -`(*)` ``item *`` is ``item[]`` in function arguments: - - In a function declaration, as per the C standard, a ``item *`` - argument is identical to a ``item[]`` argument (and ``ffi.cdef()`` - doesn't record the difference). So when you call such a function, - you can pass an argument that is accepted by either C type, like - for example passing a Python string to a ``char *`` argument - (because it works for ``char[]`` arguments) or a list of integers - to a ``int *`` argument (it works for ``int[]`` arguments). Note - that even if you want to pass a single ``item``, you need to - specify it in a list of length 1; for example, a ``struct point_s - *`` argument might be passed as ``[[x, y]]`` or ``[{'x': 5, 'y': - 10}]``. - - As an optimization, the CPython version of CFFI assumes that a - function with a ``char *`` argument to which you pass a Python - string will not actually modify the array of characters passed in, - and so passes directly a pointer inside the Python string object. - (PyPy might in the future do the same, but it is harder because - strings are not naturally zero-terminated in PyPy.) - -`(**)` C function calls are done with the GIL released. - - Note that we assume that the called functions are *not* using the - Python API from Python.h. For example, we don't check afterwards - if they set a Python exception. You may work around it, but mixing - CFFI with ``Python.h`` is not recommended. (If you do that, on - PyPy and on some platforms like Windows, you may need to explicitly - link to ``libpypy-c.dll`` to access the CPython C API compatibility - layer; indeed, CFFI-generated modules on PyPy don't link to - ``libpypy-c.dll`` on their own. But really, don't do that in the - first place.) - -`(***)` ``long double`` support: - - We keep ``long double`` values inside a cdata object to avoid - loosing precision. Normal Python floating-point numbers only - contain enough precision for a ``double``. If you really want to - convert such an object to a regular Python float (i.e. a C - ``double``), call ``float()``. If you need to do arithmetic on - such numbers without any precision loss, you need instead to define - and use a family of C functions like ``long double add(long double - a, long double b);``. - -`(****)` Slicing with ``x[start:stop]``: - - Slicing is allowed, as long as you specify explicitly both ``start`` - and ``stop`` (and don't give any ``step``). It gives a cdata - object that is a "view" of all items from ``start`` to ``stop``. - It is a cdata of type "array" (so e.g. passing it as an argument to a - C function would just convert it to a pointer to the ``start`` item). - As with indexing, negative bounds mean really negative indices, like in - C. As for slice assignment, it accepts any iterable, including a list - of items or another array-like cdata object, but the length must match. - (Note that this behavior differs from initialization: e.g. you can - say ``chararray[10:15] = "hello"``, but the assigned string must be of - exactly the correct length; no implicit null character is added.) - -`(*****)` Enums are handled like ints: - - Like C, enum types are mostly int types (unsigned or signed, int or - long; note that GCC's first choice is unsigned). Reading an enum - field of a structure, for example, returns you an integer. To - compare their value symbolically, use code like ``if x.field == - lib.FOO``. If you really want to get their value as a string, use - ``ffi.string(ffi.cast("the_enum_type", x.field))``. +.. __: ref.html diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,9 @@ haven't yet figured out the hacks needed to convince ``pydoc`` to show more. (You can use ``dir(lib)`` but it is not most helpful.) +* Yet another attempt at robustness against CPython's interpreter + shutdown logic + v1.5.2 ====== @@ -98,7 +101,7 @@ the docs`__ of ``ffi.new_handle()`` has been here since v0.8!) .. __: using.html#extern-python -.. __: using.html#initonce +.. __: ref.html#ffi-initonce .. __: using.html#ffi-new-handle @@ -147,7 +150,7 @@ which had unwanted side-effects. Try saying ``import setuptools`` first, which patches distutils... -.. _`ffi.memmove()`: using.html#memmove +.. _`ffi.memmove()`: ref.html#ffi-memmove .. __: https://bugs.python.org/issue23246 .. __: https://bitbucket.org/cffi/cffi/pull-requests/65/remove-_hack_at_distutils-which-imports/diff .. _`calling convention`: using.html#windows-calling-conventions @@ -209,7 +212,7 @@ support for `alternative allocators`__. .. __: using.html#callbacks -.. __: using.html#alternative-allocators +.. __: ref.html#new-allocator v1.1.2 From pypy.commits at gmail.com Thu Apr 21 05:45:25 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 02:45:25 -0700 (PDT) Subject: [pypy-commit] cffi default: ffi.unpack(), extern "Python+C" Message-ID: <5718a135.6a57c20a.ce5de.ffff9a4b@mx.google.com> Author: Armin Rigo Branch: Changeset: r2672:e127961f57ec Date: 2016-04-21 11:45 +0200 http://bitbucket.org/cffi/cffi/changeset/e127961f57ec/ Log: ffi.unpack(), extern "Python+C" diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -53,6 +53,9 @@ value is casted between integers or pointers of any type. +.. _ffi-errno: +.. _ffi-getwinerror: + ffi.errno, ffi.getwinerror() ++++++++++++++++++++++++++++ @@ -70,6 +73,9 @@ function as usual.) +.. _ffi-string: +.. _ffi-unpack: + ffi.string(), ffi.unpack() ++++++++++++++++++++++++++ @@ -78,8 +84,9 @@ - If 'cdata' is a pointer or array of characters or bytes, returns the null-terminated string. The returned string extends until the first - null character, or at most 'maxlen' characters. If 'cdata' is an - array then 'maxlen' defaults to its length. See ``ffi.buffer()`` below + null character. The 'maxlen' argument limits how far we look for a + null character. If 'cdata' is an + array then 'maxlen' defaults to its length. See ``ffi.unpack()`` below for a way to continue past the first null character. *Python 3:* this returns a ``bytes``, not a ``str``. @@ -94,8 +101,26 @@ If the value is out of range, it is simply returned as the stringified integer. -**ffi.unpack(...)**: XXXXXXXXXXX +**ffi.unpack(cdata, length)**: unpacks an array of C data of the given +length, returning a Python string/unicode/list. The 'cdata' should be +a pointer; if it is an array it is first converted to the pointer +type. *New in version 1.6.* +- If 'cdata' is a pointer to 'char', returns a byte string. It does + not stop at the first null. (An equivalent way to do that is + ``ffi.buffer(cdata, length)[:]``.) + +- If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + ('length' is measured in number of wchar_t; it is not the size in + bytes.) + +- If 'cdata' is a pointer to anything else, returns a list, of the + given 'length'. (A slower way to do that is ``[cdata[i] for i in + range(length)]``.) + + +.. _ffi-buffer: +.. _ffi-from-buffer: ffi.buffer(), ffi.from_buffer() +++++++++++++++++++++++++++++++ @@ -180,6 +205,10 @@ the memory at ``myptr`` to the memory at ``myptr + 1``. +.. _ffi-typeof: +.. _ffi-sizeof: +.. _ffi-alignof: + ffi.typeof(), ffi.sizeof(), ffi.alignof() +++++++++++++++++++++++++++++++++++++++++ @@ -216,6 +245,7 @@ the argument. Corresponds to the ``__alignof__`` operator in GCC. +.. _ffi-offsetof: .. _ffi-addressof: ffi.offsetof(), ffi.addressof() @@ -260,6 +290,9 @@ similarly, for a pointer, use ``ffi.new("foo_t *[1]")``. +.. _ffi-cdata: +.. _ffi-ctype: + ffi.CData, ffi.CType ++++++++++++++++++++ @@ -297,6 +330,9 @@ points in time, and using it in a ``with`` statement. +.. _ffi-new-handle: +.. _ffi-from-handle: + ffi.new_handle(), ffi.from_handle() +++++++++++++++++++++++++++++++++++ @@ -341,6 +377,9 @@ value compares equal. +.. _ffi-dlopen: +.. _ffi-dlclose: + ffi.dlopen(), ffi.dlclose() +++++++++++++++++++++++++++ @@ -428,6 +467,9 @@ .. __: https://bitbucket.org/cffi/cffi/issues/233/ +.. _ffi-getctype: +.. _ffi-list-types: + ffi.getctype(), ffi.list_types() ++++++++++++++++++++++++++++++++ diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -577,12 +577,15 @@ In case you want to access some ``extern "Python"`` function directly from the C code written in ``set_source()``, you need to write a -forward static declaration. The real implementation of this function +forward declaration. (By default it needs to be static, but see +`next paragraph`__.) The real implementation of this function is added by CFFI *after* the C code---this is needed because the declaration might use types defined by ``set_source()`` (e.g. ``event_t`` above, from the ``#include``), so it cannot be generated before. +.. __: `extern-python-c`_ + :: ffi.set_source("_demo_cffi", """ @@ -612,6 +615,46 @@ } """) +.. _extern-python-c: + +Extern "Python+C" +~~~~~~~~~~~~~~~~~ + +Functions declared with ``extern "Python"`` are generated as +``static`` functions in the C source. However, in some cases it is +convenient to make them non-static, typically when you want to make +them directly callable from other C source files. To do that, you can +say ``extern "Python+C"`` instead of just ``extern "Python"``. *New +in version 1.6.* + ++------------------------------------+--------------------------------------+ +| if the cdef contains | then CFFI generates | ++------------------------------------+--------------------------------------+ +| ``extern "Python" int f(int);`` | ``static int f(int) { /* code */ }`` | ++------------------------------------+--------------------------------------+ +| ``extern "Python+C" int f(int);`` | ``int f(int) { /* code */ }`` | ++------------------------------------+--------------------------------------+ + +The name ``extern "Python+C"`` comes from the fact that we want an +extern function in both senses: as an ``extern "Python"``, and as a +C function that is not static. + +You cannot make CFFI generate additional macros or other +compiler-specific stuff like the GCC ``__attribute__``. You can only +control whether the function should be ``static`` or not. But often, +these attributes must be written alongside the function *header*, and +it is fine if the function *implementation* does not repeat them:: + + ffi.cdef(""" + extern "Python+C" int f(int); /* not static */ + """) + ffi.set_source("_example_cffi", """ + /* the forward declaration, setting a gcc attribute + (this line could also be in some .h file, to be included + both here and in the other C files of the project) */ + int f(int) __attribute__((visibility("hidden"))); + """) + Extern "Python": reference ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -6,11 +6,11 @@ v1.6 ==== -* ffi.list_types() +* `ffi.list_types()`_ -* ffi.unpack() +* `ffi.unpack()`_ -* extern "Python+C" +* `extern "Python+C"`_ * in API mode, ``lib.foo.__doc__`` contains the C signature now. On CPython you can say ``help(lib.foo)``, but for some reason @@ -18,8 +18,12 @@ haven't yet figured out the hacks needed to convince ``pydoc`` to show more. (You can use ``dir(lib)`` but it is not most helpful.) -* Yet another attempt at robustness against CPython's interpreter - shutdown logic +* Yet another attempt at robustness of ``ffi.def_extern()`` against + CPython's interpreter shutdown logic. + +.. _`ffi.list_types()`: ref.html#ffi-list-types +.. _`ffi.unpack()`: ref.html#ffi-unpack +.. _`extern "Python+C"`: using.html#extern-python-c v1.5.2 @@ -101,8 +105,8 @@ the docs`__ of ``ffi.new_handle()`` has been here since v0.8!) .. __: using.html#extern-python -.. __: ref.html#ffi-initonce -.. __: using.html#ffi-new-handle +.. __: ref.html#ffi-init-once +.. __: ref.html#ffi-new-handle v1.3.1 @@ -212,7 +216,7 @@ support for `alternative allocators`__. .. __: using.html#callbacks -.. __: ref.html#new-allocator +.. __: ref.html#ffi-new-allocator v1.1.2 From pypy.commits at gmail.com Thu Apr 21 05:52:45 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 02:52:45 -0700 (PDT) Subject: [pypy-commit] cffi release-1.6: make release branch Message-ID: <5718a2ed.46291c0a.dbffa.ffffd02e@mx.google.com> Author: Armin Rigo Branch: release-1.6 Changeset: r2673:f33bcb134f3c Date: 2016-04-21 11:53 +0200 http://bitbucket.org/cffi/cffi/changeset/f33bcb134f3c/ Log: make release branch From pypy.commits at gmail.com Thu Apr 21 06:29:42 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 03:29:42 -0700 (PDT) Subject: [pypy-commit] cffi default: Windows fix: 'struct a' is a great name for being predefined and not reusable Message-ID: <5718ab96.47afc20a.2f58b.ffffae28@mx.google.com> Author: Armin Rigo Branch: Changeset: r2675:fce0884a0ff6 Date: 2016-04-21 12:29 +0200 http://bitbucket.org/cffi/cffi/changeset/fce0884a0ff6/ Log: Windows fix: 'struct a' is a great name for being predefined and not reusable diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -466,12 +466,12 @@ def test_introspect_order(self): ffi = FFI() - ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") - ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") - ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;") + ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;") + ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;") + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) def test_unpack(self): ffi = FFI() diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1897,14 +1897,14 @@ def test_introspect_order(): ffi = FFI() - ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") - ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") - ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;") + ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;") + ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;") verify(ffi, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) From pypy.commits at gmail.com Thu Apr 21 06:29:40 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 03:29:40 -0700 (PDT) Subject: [pypy-commit] cffi default: kill unused var Message-ID: <5718ab94.de361c0a.eed66.ffffcc6a@mx.google.com> Author: Armin Rigo Branch: Changeset: r2674:d14561075d69 Date: 2016-04-21 12:20 +0200 http://bitbucket.org/cffi/cffi/changeset/d14561075d69/ Log: kill unused var diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -650,7 +650,6 @@ _cffi_opcode_t opcodes[], int index) { PyObject *x; - CTypeDescrObject *ct; _cffi_opcode_t op = opcodes[index]; if ((((uintptr_t)op) & 1) == 0) { From pypy.commits at gmail.com Thu Apr 21 07:17:17 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 04:17:17 -0700 (PDT) Subject: [pypy-commit] cffi default: Re-add this hack, removed in 51f1337c9b4c; but this time only in tests Message-ID: <5718b6bd.d3981c0a.46835.ffffd40d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2676:ca33521c4112 Date: 2016-04-21 13:14 +0200 http://bitbucket.org/cffi/cffi/changeset/ca33521c4112/ Log: Re-add this hack, removed in 51f1337c9b4c; but this time only in tests diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -279,6 +279,14 @@ pass with open("setup.py", "w") as f: f.write("""if 1: + # https://bugs.python.org/issue23246 + import sys + if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass + import cffi ffi = cffi.FFI() ffi.set_source("pack1.mymod", "/*code would be here*/") diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -79,8 +79,21 @@ # find a solution to that: we could hack sys.path inside the # script run here, but we can't hack it in the same way in # execute(). - output = self._run([sys.executable, - os.path.join(local_dir, filename)]) + pathname = os.path.join(path, filename) + with open(pathname, 'w') as g: + g.write(''' +# https://bugs.python.org/issue23246 +import sys +if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass +''') + with open(os.path.join(local_dir, filename), 'r') as f: + g.write(f.read()) + + output = self._run([sys.executable, pathname]) match = re.compile(r"\bFILENAME: (.+)").search(output) assert match dynamic_lib_name = match.group(1) diff --git a/testing/udir.py b/testing/udir.py --- a/testing/udir.py +++ b/testing/udir.py @@ -1,3 +1,13 @@ import py +import sys udir = py.path.local.make_numbered_dir(prefix = 'ffi-') + + +# Windows-only workaround for some configurations: see +# https://bugs.python.org/issue23246 (Python 2.7.9) +if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass From pypy.commits at gmail.com Thu Apr 21 07:17:19 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 04:17:19 -0700 (PDT) Subject: [pypy-commit] cffi release-1.6: hg merge default Message-ID: <5718b6bf.2b30c20a.c591d.ffffc053@mx.google.com> Author: Armin Rigo Branch: release-1.6 Changeset: r2677:3605dede031e Date: 2016-04-21 13:17 +0200 http://bitbucket.org/cffi/cffi/changeset/3605dede031e/ Log: hg merge default diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -650,7 +650,6 @@ _cffi_opcode_t opcodes[], int index) { PyObject *x; - CTypeDescrObject *ct; _cffi_opcode_t op = opcodes[index]; if ((((uintptr_t)op) & 1) == 0) { diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -466,12 +466,12 @@ def test_introspect_order(self): ffi = FFI() - ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") - ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") - ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;") + ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;") + ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;") + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) def test_unpack(self): ffi = FFI() diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1897,14 +1897,14 @@ def test_introspect_order(): ffi = FFI() - ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") - ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") - ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") + ffi.cdef("union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;") + ffi.cdef("union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb;") + ffi.cdef("union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;") verify(ffi, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -279,6 +279,14 @@ pass with open("setup.py", "w") as f: f.write("""if 1: + # https://bugs.python.org/issue23246 + import sys + if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass + import cffi ffi = cffi.FFI() ffi.set_source("pack1.mymod", "/*code would be here*/") diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -79,8 +79,21 @@ # find a solution to that: we could hack sys.path inside the # script run here, but we can't hack it in the same way in # execute(). - output = self._run([sys.executable, - os.path.join(local_dir, filename)]) + pathname = os.path.join(path, filename) + with open(pathname, 'w') as g: + g.write(''' +# https://bugs.python.org/issue23246 +import sys +if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass +''') + with open(os.path.join(local_dir, filename), 'r') as f: + g.write(f.read()) + + output = self._run([sys.executable, pathname]) match = re.compile(r"\bFILENAME: (.+)").search(output) assert match dynamic_lib_name = match.group(1) diff --git a/testing/udir.py b/testing/udir.py --- a/testing/udir.py +++ b/testing/udir.py @@ -1,3 +1,13 @@ import py +import sys udir = py.path.local.make_numbered_dir(prefix = 'ffi-') + + +# Windows-only workaround for some configurations: see +# https://bugs.python.org/issue23246 (Python 2.7.9) +if sys.platform == 'win32': + try: + import setuptools + except ImportError: + pass From pypy.commits at gmail.com Thu Apr 21 07:19:07 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 04:19:07 -0700 (PDT) Subject: [pypy-commit] cffi default: hg merge release-1.6 Message-ID: <5718b72b.01e61c0a.80402.fffff273@mx.google.com> Author: Armin Rigo Branch: Changeset: r2679:a61429d5ad2d Date: 2016-04-21 13:18 +0200 http://bitbucket.org/cffi/cffi/changeset/a61429d5ad2d/ Log: hg merge release-1.6 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.6.0.tar.gz - - MD5: ... + - MD5: 2fae9160991afefb20ff0fbde3b14faf - - SHA: ... + - SHA: 3161ff5d1e791e86f95de258c93371dff9941c4d * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Thu Apr 21 07:19:05 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 21 Apr 2016 04:19:05 -0700 (PDT) Subject: [pypy-commit] cffi release-1.6: md5/sha1 Message-ID: <5718b729.939d1c0a.8fd6b.ffffe149@mx.google.com> Author: Armin Rigo Branch: release-1.6 Changeset: r2678:baee92bca72a Date: 2016-04-21 13:18 +0200 http://bitbucket.org/cffi/cffi/changeset/baee92bca72a/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.6.0.tar.gz - - MD5: ... + - MD5: 2fae9160991afefb20ff0fbde3b14faf - - SHA: ... + - SHA: 3161ff5d1e791e86f95de258c93371dff9941c4d * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Thu Apr 21 10:57:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Apr 2016 07:57:52 -0700 (PDT) Subject: [pypy-commit] pypy default: don't suggest the hybrid GC Message-ID: <5718ea70.81f0c20a.83631.19aa@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83804:2a6ba5aae8d8 Date: 2016-04-21 17:54 +0300 http://bitbucket.org/pypy/pypy/changeset/2a6ba5aae8d8/ Log: don't suggest the hybrid GC diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -399,7 +399,7 @@ try: configure_boehm(self.translator.platform) except CompilationError, e: - i = 'Boehm GC not installed. Try e.g. "translate.py --gc=hybrid"' + i = 'Boehm GC not installed. Try e.g. "translate.py --gc=minimark"' raise Exception(str(e) + '\n' + i) @taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], From pypy.commits at gmail.com Thu Apr 21 11:23:07 2016 From: pypy.commits at gmail.com (mjacob) Date: Thu, 21 Apr 2016 08:23:07 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Raise NotImplementedError only when /dev/urandom is not present. Fixes app-level test. Message-ID: <5718f05b.aa5ec20a.16ccd.24d1@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83805:b2a4ce692ef4 Date: 2016-04-21 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/b2a4ce692ef4/ Log: Raise NotImplementedError only when /dev/urandom is not present. Fixes app-level test. diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -3,6 +3,7 @@ from __pypy__ import validate_fd # XXX we need a way to access the current module's globals more directly... +import errno import sys if 'posix' in sys.builtin_module_names: import posix @@ -135,5 +136,7 @@ try: with open('/dev/urandom', 'rb', buffering=0) as fd: return fd.read(n) - except (OSError, IOError): - raise NotImplementedError("/dev/urandom (or equivalent) not found") + except OSError as e: + if e.errno in (errno.ENOENT, errno.ENXIO, errno.ENODEV, errno.EACCES): + raise NotImplementedError("/dev/urandom (or equivalent) not found") + raise From pypy.commits at gmail.com Thu Apr 21 12:35:06 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 21 Apr 2016 09:35:06 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: typo Message-ID: <5719013a.89cbc20a.86581.3f39@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83806:1ea1f550649e Date: 2016-04-21 19:34 +0300 http://bitbucket.org/pypy/pypy/changeset/1ea1f550649e/ Log: typo diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1220,7 +1220,7 @@ # Generate definitions for global structures structs = ["#include "] if use_micronumpy: - structs.append('#include /* api.py line 1223 */) + structs.append('#include /* api.py line 1223 */') for name, (typ, expr) in GLOBALS.iteritems(): if '#' in name: structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) From pypy.commits at gmail.com Thu Apr 21 12:36:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Apr 2016 09:36:03 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove the withprebuiltchar option and make it part of sharesmallstr Message-ID: <57190173.4849c20a.8b85f.42d5@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83808:a14e75914940 Date: 2016-04-21 19:08 +0300 http://bitbucket.org/pypy/pypy/changeset/a14e75914940/ Log: remove the withprebuiltchar option and make it part of sharesmallstr (withprebuiltchar was always turned on with O2 and Ojit, but since sharesmallstr is off, it had no effect) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,10 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - BoolOption("sharesmallstr", "always reuse the prebuilt string objects " "(the empty string and potentially single-char strings)", @@ -292,7 +288,6 @@ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) @@ -312,7 +307,7 @@ if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) config.objspace.std.suggest(withliststrategies=True) - config.objspace.std.suggest(withprebuiltchar=True) + config.objspace.std.suggest(sharesmallstr=True) config.objspace.std.suggest(withmapdict=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -847,23 +847,18 @@ def wrapstr(space, s): if space.config.objspace.std.sharesmallstr: - if space.config.objspace.std.withprebuiltchar: - # share characters and empty string - if len(s) <= 1: - if len(s) == 0: - return W_BytesObject.EMPTY - else: - s = s[0] # annotator hint: a single char - return wrapchar(space, s) - else: - # only share the empty string + # share characters and empty string + if len(s) <= 1: if len(s) == 0: return W_BytesObject.EMPTY + else: + s = s[0] # annotator hint: a single char + return wrapchar(space, s) return W_BytesObject(s) def wrapchar(space, c): - if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): + if space.config.objspace.std.sharesmallstr and not we_are_jitted(): return W_BytesObject.PREBUILT[ord(c)] else: return W_BytesObject(c) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -796,12 +796,5 @@ x = Foo() assert "hello" + x == 42 -class AppTestPrebuilt(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True} - class AppTestShare(AppTestBytesObject): spaceconfig = {"objspace.std.sharesmallstr": True} - -class AppTestPrebuiltShare(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True, - "objspace.std.sharesmallstr": True} From pypy.commits at gmail.com Thu Apr 21 12:36:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Apr 2016 09:36:01 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove objspace.std.withrangelist option, just use objspace.std.withliststrategies Message-ID: <57190171.2171c20a.1e95f.44ab@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83807:f309bca27c4f Date: 2016-04-21 18:16 +0300 http://bitbucket.org/pypy/pypy/changeset/f309bca27c4f/ Log: remove objspace.std.withrangelist option, just use objspace.std.withliststrategies diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -229,11 +229,6 @@ ("objspace.std.withtypeversion", True), ]), - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), @@ -296,7 +291,6 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) @@ -317,7 +311,7 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) + config.objspace.std.suggest(withliststrategies=True) config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(withmapdict=True) if not IS_64_BITS: diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1590,20 +1590,13 @@ assert L3.index(-0.0, i) == i -class AppTestListObjectWithRangeList(AppTestListObject): - """Run the list object tests with range lists enabled. Tests should go in - AppTestListObject so they can be run -A against CPython as well. - """ - spaceconfig = {"objspace.std.withrangelist": True} - - class AppTestRangeListForcing: """Tests for range lists that test forcing. Regular tests should go in AppTestListObject so they can be run -A against CPython as well. Separate from AppTestListObjectWithRangeList so we don't silently overwrite tests with the same names. """ - spaceconfig = {"objspace.std.withrangelist": True} + spaceconfig = {"objspace.std.withliststrategies": True} def setup_class(cls): if cls.runappdirect: From pypy.commits at gmail.com Thu Apr 21 12:36:04 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Apr 2016 09:36:04 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: if we have no rweakref, make classes uncollectable as opposed to not supporting Message-ID: <57190174.04c31c0a.8e698.6260@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83809:ab0e0f0d6bb1 Date: 2016-04-21 19:34 +0300 http://bitbucket.org/pypy/pypy/changeset/ab0e0f0d6bb1/ Log: if we have no rweakref, make classes uncollectable as opposed to not supporting get_subclasses (this is fine since translating without rweakref is anyway only for, say, testing new GCs) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -232,15 +232,12 @@ BoolOption("withtypeversion", "version type objects when changing them", cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), BoolOption("withmethodcache", "try to cache method lookups", default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), + requires=[("objspace.std.withtypeversion", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", @@ -258,14 +255,10 @@ default=False), BoolOption("getattributeshortcut", "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), BoolOption("withidentitydict", "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -557,7 +557,8 @@ def add_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: - return # no weakref support, don't keep track of subclasses + self.weak_subclasses.append(w_subclass) # not really weak, but well + return import weakref assert isinstance(w_subclass, W_TypeObject) newref = weakref.ref(w_subclass) @@ -572,7 +573,12 @@ def remove_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: - return # no weakref support, don't keep track of subclasses + for i in range(len(w_self.weak_subclasses)): + w_cls = w_self.weak_subclasses[i] + if w_cls is w_subclass: + del w_self.weak_subclasses[i] + return + return for i in range(len(w_self.weak_subclasses)): ref = w_self.weak_subclasses[i] if ref() is w_subclass: @@ -582,9 +588,7 @@ def get_subclasses(w_self): space = w_self.space if not space.config.translation.rweakref: - raise oefmt(space.w_RuntimeError, - "this feature requires weakrefs, " - "which are not available in this build of PyPy") + return self.weak_subclasses[:] subclasses_w = [] for ref in w_self.weak_subclasses: w_ob = ref() From pypy.commits at gmail.com Thu Apr 21 15:58:35 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 21 Apr 2016 12:58:35 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix test, PyThreadState_Get() cannot be called after PyEval_SaveThread since the state is set to NULL Message-ID: <571930eb.52ad1c0a.ca617.ffffbb6a@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83810:80bae955e12e Date: 2016-04-21 22:57 +0300 http://bitbucket.org/pypy/pypy/changeset/80bae955e12e/ Log: fix test, PyThreadState_Get() cannot be called after PyEval_SaveThread since the state is set to NULL diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -118,19 +118,20 @@ module = self.import_extension('foo', [ ("bounce", "METH_NOARGS", """ + if (PyEval_ThreadsInitialized() == 0) + { + PyEval_InitThreads(); + } + PyGILState_Ensure(); PyThreadState *tstate = PyEval_SaveThread(); if (tstate == NULL) { return PyLong_FromLong(0); } - if (PyThreadState_Get() != NULL) { - return PyLong_FromLong(1); - } - PyEval_RestoreThread(tstate); if (PyThreadState_Get() != tstate) { - return PyLong_FromLong(2); + return PyLong_FromLong(1); } return PyLong_FromLong(3); From pypy.commits at gmail.com Thu Apr 21 23:09:04 2016 From: pypy.commits at gmail.com (pjenvey) Date: Thu, 21 Apr 2016 20:09:04 -0700 (PDT) Subject: [pypy-commit] pypy py3k: -v/PYTHONVERBOSE & PYTHONCASEOK are now supported thanks to importlib Message-ID: <571995d0.d3161c0a.eb71.49e3@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83811:f42059549ea3 Date: 2016-04-21 20:07 -0700 http://bitbucket.org/pypy/pypy/changeset/f42059549ea3/ Log: -v/PYTHONVERBOSE & PYTHONCASEOK are now supported thanks to importlib diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,7 +2,7 @@ # This is pure Python code that handles the main entry point into "pypy". # See test/test_app_main. -# Missing vs CPython: -b, -d, -v, -x, -3 +# Missing vs CPython: -b, -d, -x, -3 from __future__ import print_function, unicode_literals USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): @@ -20,6 +20,8 @@ -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE -S : don't imply 'import site' on initialization -u : unbuffered binary stdout and stderr; also PYTHONUNBUFFERED=x +-v : verbose (trace import statements); also PYTHONVERBOSE=x + can be supplied multiple times to increase verbosity -V : print the Python version number and exit (also --version) -W arg : warning control; arg is action:message:category:module:lineno also PYTHONWARNINGS=arg @@ -30,12 +32,13 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -# Missing vs CPython: PYTHONHOME, PYTHONCASEOK +# Missing vs CPython: PYTHONHOME USAGE2 = """ Other environment variables: PYTHONSTARTUP: file executed on interactive startup (no default) PYTHONPATH : %r-separated list of directories prefixed to the default module search path. The result is sys.path. +PYTHONCASEOK : ignore case in 'import' statements (Windows). PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. PYPY_IRC_TOPIC: if set to a non-empty value, print a random #pypy IRC topic at startup of interactive mode. From pypy.commits at gmail.com Fri Apr 22 03:07:19 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:19 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: forgot to fix this Message-ID: <5719cda7.891d1c0a.39a2b.ffff8284@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83812:5e472096768f Date: 2016-04-21 19:46 +0300 http://bitbucket.org/pypy/pypy/changeset/5e472096768f/ Log: forgot to fix this diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -114,8 +114,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations From pypy.commits at gmail.com Fri Apr 22 03:07:21 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:21 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove the withtypeversion option and turn it on by default Message-ID: <5719cda9.52ad1c0a.c220c.ffff80c0@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83813:8af4c34be749 Date: 2016-04-21 19:49 +0300 http://bitbucket.org/pypy/pypy/changeset/8af4c34be749/ Log: remove the withtypeversion option and turn it on by default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -222,22 +222,15 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), ]), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False), - BoolOption("withmethodcache", "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True)]), + default=False), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -52,14 +52,6 @@ set_pypy_opt_level(conf, '0') assert not conf.objspace.std.getattributeshortcut -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache - def test_check_documentation(): def check_file_exists(fn): assert configdocdir.join(fn).check() diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -1,7 +1,6 @@ from pypy.objspace.std.test import test_typeobject class TestVersionedType(test_typeobject.TestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} def get_three_classes(self): space = self.space @@ -261,6 +260,3 @@ -class AppTestVersionedType(test_typeobject.AppTestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} - diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -38,9 +38,8 @@ def unwrap_cell(space, w_value): - if space.config.objspace.std.withtypeversion: - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value def write_cell(space, w_cell, w_value): @@ -170,14 +169,13 @@ layout = setup_user_defined_type(w_self, force_new_layout) w_self.layout = layout - if space.config.objspace.std.withtypeversion: - if not is_mro_purely_of_types(w_self.mro_w): - pass - else: - # the _version_tag should change, whenever the content of - # dict_w of any of the types in the mro changes, or if the mro - # itself changes - w_self._version_tag = VersionTag() + if not is_mro_purely_of_types(w_self.mro_w): + pass + else: + # the _version_tag should change, whenever the content of + # dict_w of any of the types in the mro changes, or if the mro + # itself changes + w_self._version_tag = VersionTag() if space.config.objspace.std.withmapdict: from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator if w_self.hasdict: @@ -197,11 +195,6 @@ """ space = w_self.space assert w_self.is_heaptype() or w_self.is_cpytype() - if (not space.config.objspace.std.withtypeversion and - not space.config.objspace.std.getattributeshortcut and - not space.config.objspace.std.withidentitydict and - not space.config.objspace.std.newshortcut): - return if space.config.objspace.std.getattributeshortcut: w_self.uses_object_getattribute = False @@ -215,8 +208,7 @@ if space.config.objspace.std.newshortcut: w_self.w_new_function = None - if (space.config.objspace.std.withtypeversion - and w_self._version_tag is not None): + if w_self._version_tag is not None: w_self._version_tag = VersionTag() subclasses_w = w_self.get_subclasses() @@ -296,13 +288,12 @@ return compute_C3_mro(w_self.space, w_self) def getdictvalue(w_self, space, attr): - if space.config.objspace.std.withtypeversion: - version_tag = w_self.version_tag() - if version_tag is not None: - return unwrap_cell( - space, - w_self._pure_getdictvalue_no_unwrapping( - space, version_tag, attr)) + version_tag = w_self.version_tag() + if version_tag is not None: + return unwrap_cell( + space, + w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, attr)) w_value = w_self._getdictvalue_no_unwrapping(space, attr) return unwrap_cell(space, w_value) @@ -333,14 +324,13 @@ msg = ("a __del__ method added to an existing type will not be " "called") space.warn(space.wrap(msg), space.w_RuntimeWarning) - if space.config.objspace.std.withtypeversion: - version_tag = w_self.version_tag() - if version_tag is not None: - w_curr = w_self._pure_getdictvalue_no_unwrapping( - space, version_tag, name) - w_value = write_cell(space, w_curr, w_value) - if w_value is None: - return True + version_tag = w_self.version_tag() + if version_tag is not None: + w_curr = w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, name) + w_value = write_cell(space, w_curr, w_value) + if w_value is None: + return True w_self.mutated(name) w_self.dict_w[name] = w_value return True @@ -429,8 +419,7 @@ return tup tup_w = w_self._pure_lookup_where_with_method_cache(name, version_tag) w_class, w_value = tup_w - if (space.config.objspace.std.withtypeversion and - isinstance(w_value, MutableCell)): + if isinstance(w_value, MutableCell): return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one @@ -524,7 +513,7 @@ def issubtype(w_self, w_type): promote(w_self) promote(w_type) - if w_self.space.config.objspace.std.withtypeversion and we_are_jitted(): + if we_are_jitted(): version_tag1 = w_self.version_tag() version_tag2 = w_type.version_tag() if version_tag1 is not None and version_tag2 is not None: @@ -843,8 +832,7 @@ cls.mro_w = old_mro w_type.bases_w = saved_bases_w raise - if (space.config.objspace.std.withtypeversion and - w_type.version_tag() is not None and + if (w_type.version_tag() is not None and not is_mro_purely_of_types(w_type.mro_w)): # Disable method cache if the hierarchy isn't pure. w_type._version_tag = None From pypy.commits at gmail.com Fri Apr 22 03:07:23 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:23 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove withmethodcache option and have it always be on Message-ID: <5719cdab.89691c0a.5114b.ffff81f9@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83814:c0ee63ae450b Date: 2016-04-22 09:46 +0300 http://bitbucket.org/pypy/pypy/changeset/c0ee63ae450b/ Log: remove withmethodcache option and have it always be on diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -228,14 +228,10 @@ "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withmethodcache", - "try to cache method lookups", - default=False), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -273,7 +269,6 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -133,8 +133,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -67,12 +67,7 @@ @jit.elidable def find_map_attr(self, name, index): - if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(name, index) - return self._find_map_attr(name, index) - - @jit.dont_look_inside - def _find_map_attr_cache(self, name, index): + # attr cache space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -429,7 +424,6 @@ class MapAttrCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self.names = [None] * SIZE diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1115,7 +1115,6 @@ class std: withsmalldicts = False withcelldict = False - withmethodcache = False withidentitydict = False withmapdict = False diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -6,7 +6,6 @@ class std: withsmalldicts = False withcelldict = False - withmethodcache = False withidentitydict = False withmapdict = True diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -68,7 +68,6 @@ class MethodCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.versions = [None] * SIZE self.names = [None] * SIZE @@ -352,17 +351,11 @@ def lookup(w_self, name): # note that this doesn't call __get__ on the result at all space = w_self.space - if space.config.objspace.std.withmethodcache: - return w_self.lookup_where_with_method_cache(name)[1] - - return w_self._lookup(name) + return w_self.lookup_where_with_method_cache(name)[1] def lookup_where(w_self, name): space = w_self.space - if space.config.objspace.std.withmethodcache: - return w_self.lookup_where_with_method_cache(name) - - return w_self._lookup_where(name) + return w_self.lookup_where_with_method_cache(name) @unroll_safe def lookup_starting_at(w_self, w_starttype, name): @@ -412,7 +405,6 @@ def lookup_where_with_method_cache(w_self, name): space = w_self.space promote(w_self) - assert space.config.objspace.std.withmethodcache version_tag = promote(w_self.version_tag()) if version_tag is None: tup = w_self._lookup_where(name) @@ -424,10 +416,7 @@ return tup_w # don't make a new tuple, reuse the old one def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): - if w_self.space.config.objspace.std.withmethodcache: - return w_self._pure_lookup_where_with_method_cache(name, version_tag) - else: - return w_self._lookup_where_all_typeobjects(name) + return w_self._pure_lookup_where_with_method_cache(name, version_tag) @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): From pypy.commits at gmail.com Fri Apr 22 03:07:25 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:25 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove option docs Message-ID: <5719cdad.92371c0a.f3d9a.ffff8141@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83815:6bf9184b57f4 Date: 2016-04-22 09:48 +0300 http://bitbucket.org/pypy/pypy/changeset/6bf9184b57f4/ Log: remove option docs diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. From pypy.commits at gmail.com Fri Apr 22 03:07:26 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:26 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: again, weakrefs no longer needed Message-ID: <5719cdae.508e1c0a.3593.7f0a@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83816:f71d1b5b202c Date: 2016-04-22 09:50 +0300 http://bitbucket.org/pypy/pypy/changeset/f71d1b5b202c/ Log: again, weakrefs no longer needed diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -251,9 +251,7 @@ BoolOption("withidentitydict", "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), ]), ]) From pypy.commits at gmail.com Fri Apr 22 03:07:28 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:07:28 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove withidentitydict option and make that always on Message-ID: <5719cdb0.22c8c20a.9fab7.258b@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83817:ef01c84d0a87 Date: 2016-04-22 10:06 +0300 http://bitbucket.org/pypy/pypy/changeset/ef01c84d0a87/ Log: remove withidentitydict option and make that always on diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -249,9 +249,6 @@ "cache and shortcut calling __new__ from builtin types", default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False), ]), ]) @@ -272,7 +269,6 @@ config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -592,7 +592,6 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): - withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: self.switch_to_bytes_strategy(w_dict) return @@ -602,7 +601,7 @@ w_type = self.space.type(w_key) if self.space.is_w(w_type, self.space.w_int): self.switch_to_int_strategy(w_dict) - elif withidentitydict and w_type.compares_by_identity(): + elif w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1115,7 +1115,6 @@ class std: withsmalldicts = False withcelldict = False - withidentitydict = False withmapdict = False FakeSpace.config = Config() diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -2,7 +2,6 @@ from pypy.interpreter.gateway import interp2app class AppTestComparesByIdentity: - spaceconfig = {"objspace.std.withidentitydict": True} def setup_class(cls): from pypy.objspace.std import identitydict @@ -56,7 +55,6 @@ class AppTestIdentityDict(object): - spaceconfig = {"objspace.std.withidentitydict": True} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -3,9 +3,6 @@ class AppTestIdentitySet(object): - # needed for compares_by_identity - spaceconfig = {"objspace.std.withidentitydict": True} - def setup_class(cls): from pypy.objspace.std import identitydict if cls.runappdirect: diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -124,6 +124,7 @@ "flag_cpytype", "flag_abstract?", "flag_sequence_bug_compat", + "compares_by_identity_status?", 'needsdel', 'weakrefable', 'hasdict', @@ -138,7 +139,7 @@ # (False is a conservative default, fixed during real usage) uses_object_getattribute = False - # for config.objspace.std.withidentitydict + # for the IdentityDictStrategy compares_by_identity_status = UNKNOWN # used to cache the type's __new__ function @@ -199,10 +200,9 @@ w_self.uses_object_getattribute = False # ^^^ conservative default, fixed during real usage - if space.config.objspace.std.withidentitydict: - if (key is None or key == '__eq__' or - key == '__cmp__' or key == '__hash__'): - w_self.compares_by_identity_status = UNKNOWN + if (key is None or key == '__eq__' or + key == '__cmp__' or key == '__hash__'): + w_self.compares_by_identity_status = UNKNOWN if space.config.objspace.std.newshortcut: w_self.w_new_function = None @@ -253,8 +253,6 @@ def compares_by_identity(w_self): from pypy.objspace.descroperation import object_hash, type_eq - if not w_self.space.config.objspace.std.withidentitydict: - return False # conservative # if w_self.compares_by_identity_status != UNKNOWN: # fast path From pypy.commits at gmail.com Fri Apr 22 03:30:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:30:24 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove some vestiges of withsmalldicts(!) in mocking code Message-ID: <5719d310.442cc20a.75921.2c2b@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83818:33f63564d8e3 Date: 2016-04-22 10:07 +0300 http://bitbucket.org/pypy/pypy/changeset/33f63564d8e3/ Log: remove some vestiges of withsmalldicts(!) in mocking code diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1113,7 +1113,6 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False withmapdict = False diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -4,9 +4,7 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withidentitydict = False withmapdict = True space = FakeSpace() From pypy.commits at gmail.com Fri Apr 22 03:30:26 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:30:26 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove getattributeshortcut option and turn it on by default Message-ID: <5719d312.aa5ec20a.16ccd.2fcf@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83819:75a6a2b7379e Date: 2016-04-22 10:21 +0300 http://bitbucket.org/pypy/pypy/changeset/75a6a2b7379e/ Log: remove getattributeshortcut option and turn it on by default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -220,9 +220,7 @@ BoolOption("withmapdict", "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ]), + default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", @@ -242,9 +240,6 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", default=False), @@ -266,7 +261,6 @@ if level in ['2', '3', 'jit']: config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) #if not IS_64_BITS: diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -7,14 +7,13 @@ def collect(space, generation=0): "Run a full collection. The optional argument is ignored." # First clear the method cache. See test_gc for an example of why. - if space.config.objspace.std.withmethodcache: - from pypy.objspace.std.typeobject import MethodCache - cache = space.fromcache(MethodCache) + from pypy.objspace.std.typeobject import MethodCache + cache = space.fromcache(MethodCache) + cache.clear() + if space.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MapAttrCache) cache.clear() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import MapAttrCache - cache = space.fromcache(MapAttrCache) - cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -517,7 +517,6 @@ return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) _DescrOperation_is_true = is_true - _DescrOperation_getattr = getattr def is_true(self, w_obj): # a shortcut for performance @@ -526,8 +525,6 @@ return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): - if not self.config.objspace.std.getattributeshortcut: - return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/objspace/std/test/test_callmethod.py b/pypy/objspace/std/test/test_callmethod.py --- a/pypy/objspace/std/test/test_callmethod.py +++ b/pypy/objspace/std/test/test_callmethod.py @@ -97,21 +97,17 @@ else: raise Exception("did not raise?") """ - + def test_kwargs(self): exec """if 1: class C(object): def f(self, a): return a + 2 - + assert C().f(a=3) == 5 """ -class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestCallMethod: def test_space_call_method(self): space = self.space diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1115,6 +1115,8 @@ class std: withcelldict = False withmapdict = False + methodcachesizeexp = 11 + withmethodcachecounter = False FakeSpace.config = Config() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -6,6 +6,8 @@ class std: withcelldict = False withmapdict = True + methodcachesizeexp = 11 + withmethodcachecounter = False space = FakeSpace() space.config = Config diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1105,7 +1105,6 @@ class AppTestGetattributeShortcut: - spaceconfig = {"objspace.std.getattributeshortcut": True} def test_reset_logic(self): class X(object): diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -273,13 +273,3 @@ i += 1 -class AppTestWithGetAttributeShortcut(AppTestUserObject): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - -class AppTestDescriptorWithGetAttributeShortcut( - test_descriptor.AppTest_Descriptor): - # for the individual tests see - # ====> ../../test/test_descriptor.py - - spaceconfig = {"objspace.std.getattributeshortcut": True} diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -135,7 +135,7 @@ 'mro_w?[*]', ] - # for config.objspace.std.getattributeshortcut + # wether the class has an overridden __getattribute__ # (False is a conservative default, fixed during real usage) uses_object_getattribute = False @@ -196,9 +196,8 @@ space = w_self.space assert w_self.is_heaptype() or w_self.is_cpytype() - if space.config.objspace.std.getattributeshortcut: - w_self.uses_object_getattribute = False - # ^^^ conservative default, fixed during real usage + w_self.uses_object_getattribute = False + # ^^^ conservative default, fixed during real usage if (key is None or key == '__eq__' or key == '__cmp__' or key == '__hash__'): @@ -230,15 +229,13 @@ the one from object, in which case it returns None """ from pypy.objspace.descroperation import object_getattribute if not we_are_jitted(): - shortcut = w_self.space.config.objspace.std.getattributeshortcut - if not shortcut or not w_self.uses_object_getattribute: + if not w_self.uses_object_getattribute: # slow path: look for a custom __getattribute__ on the class w_descr = w_self.lookup('__getattribute__') # if it was not actually overriden in the class, we remember this # fact for the next time. if w_descr is object_getattribute(w_self.space): - if shortcut: - w_self.uses_object_getattribute = True + w_self.uses_object_getattribute = True else: return w_descr return None From pypy.commits at gmail.com Fri Apr 22 03:30:28 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Apr 2016 00:30:28 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: move the tests for now on-by-default identity tracking to test_typeobject Message-ID: <5719d314.82bb1c0a.6f114.ffff89cf@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83820:cabe08a0a60d Date: 2016-04-22 10:24 +0300 http://bitbucket.org/pypy/pypy/changeset/cabe08a0a60d/ Log: move the tests for now on-by-default identity tracking to test_typeobject diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,59 +1,6 @@ import py from pypy.interpreter.gateway import interp2app -class AppTestComparesByIdentity: - - def setup_class(cls): - from pypy.objspace.std import identitydict - if cls.runappdirect: - py.test.skip("interp2app doesn't work on appdirect") - - def compares_by_identity(space, w_cls): - return space.wrap(w_cls.compares_by_identity()) - cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) - - def test_compares_by_identity(self): - class Plain(object): - pass - - class CustomEq(object): - def __eq__(self, other): - return True - - class CustomCmp (object): - def __cmp__(self, other): - return 0 - - class CustomHash(object): - def __hash__(self): - return 0 - - class TypeSubclass(type): - pass - - class TypeSubclassCustomCmp(type): - def __cmp__(self, other): - return 0 - - assert self.compares_by_identity(Plain) - assert not self.compares_by_identity(CustomEq) - assert not self.compares_by_identity(CustomCmp) - assert not self.compares_by_identity(CustomHash) - assert self.compares_by_identity(type) - assert self.compares_by_identity(TypeSubclass) - assert not self.compares_by_identity(TypeSubclassCustomCmp) - - def test_modify_class(self): - class X(object): - pass - - assert self.compares_by_identity(X) - X.__eq__ = lambda x: None - assert not self.compares_by_identity(X) - del X.__eq__ - assert self.compares_by_identity(X) - - class AppTestIdentityDict(object): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1238,3 +1238,57 @@ class Y: __metaclass__ = X assert (Y < Y) is True + + +class AppTestComparesByIdentity: + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("interp2app doesn't work on appdirect") + + def compares_by_identity(space, w_cls): + return space.wrap(w_cls.compares_by_identity()) + cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) + + def test_compares_by_identity(self): + class Plain(object): + pass + + class CustomEq(object): + def __eq__(self, other): + return True + + class CustomCmp (object): + def __cmp__(self, other): + return 0 + + class CustomHash(object): + def __hash__(self): + return 0 + + class TypeSubclass(type): + pass + + class TypeSubclassCustomCmp(type): + def __cmp__(self, other): + return 0 + + assert self.compares_by_identity(Plain) + assert not self.compares_by_identity(CustomEq) + assert not self.compares_by_identity(CustomCmp) + assert not self.compares_by_identity(CustomHash) + assert self.compares_by_identity(type) + assert self.compares_by_identity(TypeSubclass) + assert not self.compares_by_identity(TypeSubclassCustomCmp) + + def test_modify_class(self): + class X(object): + pass + + assert self.compares_by_identity(X) + X.__eq__ = lambda x: None + assert not self.compares_by_identity(X) + del X.__eq__ + assert self.compares_by_identity(X) + + From pypy.commits at gmail.com Fri Apr 22 03:54:30 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 22 Apr 2016 00:54:30 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add tests for bytearray, pass on cpython with -A Message-ID: <5719d8b6.0f801c0a.51e59.ffff933f@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83821:8ad9620faa9e Date: 2016-04-22 10:53 +0300 http://bitbucket.org/pypy/pypy/changeset/8ad9620faa9e/ Log: add tests for bytearray, pass on cpython with -A diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1287,7 +1287,7 @@ prefix=prefix) code = "#include \n" if use_micronumpy: - code += "#include /* api.py line 1290 */" + code += "#include /* api.py line 1290 */\n" code += "\n".join(functions) eci = build_eci(False, export_symbols, code, use_micronumpy) diff --git a/pypy/module/cpyext/test/test_bytearrayobject.py b/pypy/module/cpyext/test/test_bytearrayobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_bytearrayobject.py @@ -0,0 +1,170 @@ +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + +class AppTestStringObject(AppTestCpythonExtensionBase): + def test_basic(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyByteArray_FromStringAndSize( + "Hello world", 11); + """), + ("get_hello2", "METH_NOARGS", + """ + return PyByteArray_FromStringAndSize("Hello world", 12); + """), + ("test_Size", "METH_NOARGS", + """ + PyObject* s = PyByteArray_FromStringAndSize("Hello world", 12); + int result = 0; + size_t expected_size; + + if(PyByteArray_Size(s) == 12) { + result = 1; + } + #ifdef PYPY_VERSION + expected_size = sizeof(void*)*7; + #elif defined Py_DEBUG + expected_size = 64; + #else + expected_size = 48; + #endif + if(s->ob_type->tp_basicsize != expected_size) + { + printf("tp_basicsize==%ld\\n", s->ob_type->tp_basicsize); + result = 0; + } + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_is_bytearray", "METH_VARARGS", + """ + return PyBool_FromLong(PyByteArray_Check(PyTuple_GetItem(args, 0))); + """)], prologue='#include ') + assert module.get_hello1() == 'Hello world' + assert module.get_hello2() == 'Hello world\x00' + assert module.test_Size() + assert module.test_is_bytearray(bytearray("")) + assert not module.test_is_bytearray(()) + + def test_bytearray_buffer_init(self): + module = self.import_extension('foo', [ + ("getbytearray", "METH_NOARGS", + """ + PyObject *s, *t; + char* c; + Py_ssize_t len; + + s = PyByteArray_FromStringAndSize(NULL, 4); + if (s == NULL) + return NULL; + t = PyByteArray_FromStringAndSize(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyByteArray_AsString(s); + c[0] = 'a'; + c[1] = 'b'; + c[2] = 0; + c[3] = 'c'; + return s; + """), + ]) + s = module.getbytearray() + assert len(s) == 4 + assert s == 'ab\x00c' + + def test_bytearray_mutable(self): + module = self.import_extension('foo', [ + ("mutable", "METH_NOARGS", + """ + PyObject *base; + PyByteArrayObject *obj; + char * p_str; + base = PyByteArray_FromStringAndSize("test", 10); + if (PyByteArray_GET_SIZE(base) != 10) + return PyLong_FromLong(-PyByteArray_GET_SIZE(base)); + obj = (PyByteArrayObject*)base; + memcpy(PyByteArray_AS_STRING(obj), "works", 6); + Py_INCREF(base); + return base; + """), + ]) + s = module.mutable() + if s == '\x00' * 10: + assert False, "no RW access to bytearray" + assert s[:6] == 'works\x00' + + def test_AsByteArray(self): + module = self.import_extension('foo', [ + ("getbytearray", "METH_NOARGS", + """ + PyObject* s1 = PyByteArray_FromStringAndSize("test", 4); + char* c = PyByteArray_AsString(s1); + PyObject* s2 = PyByteArray_FromStringAndSize(c, 4); + Py_DECREF(s1); + return s2; + """), + ]) + s = module.getbytearray() + assert s == 'test' + + def test_manipulations(self): + module = self.import_extension('foo', [ + ("bytearray_from_string", "METH_VARARGS", + ''' + return PyByteArray_FromStringAndSize(PyString_AsString( + PyTuple_GetItem(args, 0)), 4); + ''' + ), + ("concat", "METH_VARARGS", + """ + PyObject * ret, *right, *left; + PyObject *ba1, *ba2; + if (!PyArg_ParseTuple(args, "OO", &left, &right)) { + return PyString_FromString("parse failed"); + } + ba1 = PyByteArray_FromObject(left); + ba2 = PyByteArray_FromObject(right); + if (ba1 == NULL || ba2 == NULL) + { + /* exception should be set */ + return NULL; + } + ret = PyByteArray_Concat(ba1, ba2); + return ret; + """)]) + assert module.bytearray_from_string("huheduwe") == "huhe" + ret = module.concat('abc', 'def') + assert ret == 'abcdef' + assert not isinstance(ret, str) + assert isinstance(ret, bytearray) + raises(TypeError, module.concat, 'abc', u'def') + + def test_bytearray_resize(self): + module = self.import_extension('foo', [ + ("bytearray_resize", "METH_VARARGS", + ''' + PyObject *obj, *ba; + int newsize, oldsize, ret; + if (!PyArg_ParseTuple(args, "Oi", &obj, &newsize)) { + return PyString_FromString("parse failed"); + } + + ba = PyByteArray_FromObject(obj); + oldsize = PyByteArray_Size(ba); + if (oldsize == 0) + { + return PyString_FromString("oldsize is 0"); + } + ret = PyByteArray_Resize(ba, newsize); + if (ret != 0) + { + printf("ret, oldsize, newsize= %d, %d, %d\\n", ret, oldsize, newsize); + return PyString_FromString("ret != 0"); + } + return ba; + ''' + )]) + ret = module.bytearray_resize('abc', 6) + assert len(ret) == 6,"%s, len=%d" % (ret, len(ret)) + From pypy.commits at gmail.com Fri Apr 22 06:34:42 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 03:34:42 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in techtonik/pypy-1/techtonik/introductionrst-simplify-explanation-abo-1460879168046 (pull request #429) Message-ID: <5719fe42.89691c0a.5114b.ffffd2f8@mx.google.com> Author: Armin Rigo Branch: Changeset: r83825:63c7bb7b783d Date: 2016-04-22 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/63c7bb7b783d/ Log: Merged in techtonik/pypy-1/techtonik/introductionrst-simplify- explanation-abo-1460879168046 (pull request #429) introduction.rst simplify explanation about PyPy diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -1,16 +1,22 @@ What is PyPy? ============= -In common parlance, PyPy has been used to mean two things. The first is the -:ref:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. +Historically, PyPy has been used to mean two things. The first is the +:ref:`RPython translation toolchain ` for generating +interpreters for dynamic programming languages. And the second is one +particular implementation of Python_ produced with it. Because RPython +uses the same syntax as Python, this generated version became known as +Python interpreter written in Python. It is designed to be flexible and +easy to experiment with. -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the +To make it more clear, we start with source code written in RPython, +apply the RPython translation toolchain, and end up with PyPy as a +binary executable. This executable is the Python interpreter. + +Double usage has proven to be confusing, so we've moved away from using +the word PyPy to mean both toolchain and generated interpreter. Now we +use word PyPy to refer to the Python implementation, and explicitly +mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old From pypy.commits at gmail.com Fri Apr 22 06:35:07 2016 From: pypy.commits at gmail.com (techtonik) Date: Fri, 22 Apr 2016 03:35:07 -0700 (PDT) Subject: [pypy-commit] pypy techtonik/introductionrst-simplify-explanation-abo-1460879168046: introduction.rst simplify explanation about PyPy Message-ID: <5719fe5b.4412c30a.595e.776d@mx.google.com> Author: anatoly techtonik Branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 Changeset: r83822:4ee4bced5787 Date: 2016-04-17 07:55 +0000 http://bitbucket.org/pypy/pypy/changeset/4ee4bced5787/ Log: introduction.rst simplify explanation about PyPy diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -1,16 +1,17 @@ What is PyPy? ============= -In common parlance, PyPy has been used to mean two things. The first is the -:ref:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. +Historically, PyPy has been used to mean two things. The first is the +:ref:`RPython translation toolchain ` for generating +interpreters for dynamic programming languages. And the second is one +particular implementation generated with it -- an implementation of the +Python_. Because RPython uses the same syntax as Python, this generated +version became known as Python interpreter written in Python. This was +designed to be flexible and easy to experiment with. -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the +Double usage has proven to be confusing, so we've moved away from using +the word PyPy to mean both things. Now we use word PyPy to refer to +the Python implementation, and explicitly mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old From pypy.commits at gmail.com Fri Apr 22 06:35:09 2016 From: pypy.commits at gmail.com (techtonik) Date: Fri, 22 Apr 2016 03:35:09 -0700 (PDT) Subject: [pypy-commit] pypy techtonik/introductionrst-simplify-explanation-abo-1460879168046: introduction.rst attempt to explain it a little better Message-ID: <5719fe5d.43ecc20a.5713.79e7@mx.google.com> Author: anatoly techtonik Branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 Changeset: r83823:598a7426415e Date: 2016-04-22 09:56 +0000 http://bitbucket.org/pypy/pypy/changeset/598a7426415e/ Log: introduction.rst attempt to explain it a little better diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -4,10 +4,10 @@ Historically, PyPy has been used to mean two things. The first is the :ref:`RPython translation toolchain ` for generating interpreters for dynamic programming languages. And the second is one -particular implementation generated with it -- an implementation of the -Python_. Because RPython uses the same syntax as Python, this generated -version became known as Python interpreter written in Python. This was -designed to be flexible and easy to experiment with. +particular implementation of Python_ generated with it. Because RPython +uses the same syntax as Python, this generated version became known as +Python interpreter written in Python. It is designed to be flexible and +easy to experiment with. Double usage has proven to be confusing, so we've moved away from using the word PyPy to mean both things. Now we use word PyPy to refer to From pypy.commits at gmail.com Fri Apr 22 06:35:10 2016 From: pypy.commits at gmail.com (techtonik) Date: Fri, 22 Apr 2016 03:35:10 -0700 (PDT) Subject: [pypy-commit] pypy techtonik/introductionrst-simplify-explanation-abo-1460879168046: introduction.rst add comments be Armin Rigo Message-ID: <5719fe5e.a9a1c20a.da1a.7574@mx.google.com> Author: anatoly techtonik Branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 Changeset: r83824:c54661e15b83 Date: 2016-04-22 10:10 +0000 http://bitbucket.org/pypy/pypy/changeset/c54661e15b83/ Log: introduction.rst add comments be Armin Rigo diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -4,14 +4,19 @@ Historically, PyPy has been used to mean two things. The first is the :ref:`RPython translation toolchain ` for generating interpreters for dynamic programming languages. And the second is one -particular implementation of Python_ generated with it. Because RPython +particular implementation of Python_ produced with it. Because RPython uses the same syntax as Python, this generated version became known as Python interpreter written in Python. It is designed to be flexible and easy to experiment with. +To make it more clear, we start with source code written in RPython, +apply the RPython translation toolchain, and end up with PyPy as a +binary executable. This executable is the Python interpreter. + Double usage has proven to be confusing, so we've moved away from using -the word PyPy to mean both things. Now we use word PyPy to refer to -the Python implementation, and explicitly mention +the word PyPy to mean both toolchain and generated interpreter. Now we +use word PyPy to refer to the Python implementation, and explicitly +mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old From pypy.commits at gmail.com Fri Apr 22 08:01:02 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 05:01:02 -0700 (PDT) Subject: [pypy-commit] pypy default: silence test_whatsnew Message-ID: <571a127e.55301c0a.346bc.3938@mx.google.com> Author: Armin Rigo Branch: Changeset: r83826:dcde037ada8c Date: 2016-04-22 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/dcde037ada8c/ Log: silence test_whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,4 @@ .. this is a revision shortly after release-5.1 .. startrev: aa60332382a1 +.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 From pypy.commits at gmail.com Fri Apr 22 08:01:15 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 22 Apr 2016 05:01:15 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add PyBytesArrayObject, based on PyStringObject. Still not clear how to make it mutable Message-ID: <571a128b.aa5ec20a.16ccd.ffff9a41@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83827:38e54c1f208b Date: 2016-04-22 14:58 +0300 http://bitbucket.org/pypy/pypy/changeset/38e54c1f208b/ Log: add PyBytesArrayObject, based on PyStringObject. Still not clear how to make it mutable diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -35,6 +35,7 @@ import pypy.module.cpyext.typeobject import pypy.module.cpyext.object import pypy.module.cpyext.bytesobject +import pypy.module.cpyext.bytearrayobject import pypy.module.cpyext.tupleobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject diff --git a/pypy/module/cpyext/bytearrayobject.py b/pypy/module/cpyext/bytearrayobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bytearrayobject.py @@ -0,0 +1,120 @@ +from pypy.interpreter.error import OperationError, oefmt +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, cpython_struct, bootstrap_function, build_type_checkers, + PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) +from pypy.module.cpyext.pyerrors import PyErr_BadArgument +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr, Py_IncRef) +# Type PyByteArrayObject represents a mutable array of bytes. +# The Python API is that of a sequence; +# the bytes are mapped to ints in [0, 256). +# Bytes are not characters; they may be used to encode characters. +# The only way to go between bytes and str/unicode is via encoding +# and decoding. +# For the convenience of C programmers, the bytes type is considered +# to contain a char pointer, not an unsigned char pointer. + +# XXX Since the ob_bytes is mutable, we must reflect the buffer back +# into the W_ByteArray object at each call to from_ref and each call to +# exported functions + +PyByteArrayObjectStruct = lltype.ForwardReference() +PyByteArrayObject = lltype.Ptr(PyByteArrayObjectStruct) +PyByteArrayObjectFields = PyVarObjectFields + \ + (("ob_exports", rffi.INT), ("ob_alloc", rffi.LONG), ("ob_bytes", rffi.CCHARP)) +cpython_struct("PyByteArrayObject", PyByteArrayObjectFields, PyByteArrayObjectStruct) + + at bootstrap_function +def init_bytearrayobject(space): + "Type description of PyByteArrayObject" + make_typedescr(space.w_str.layout.typedef, + basestruct=PyByteArrayObject.TO, + attach=bytearray_attach, + dealloc=bytearray_dealloc, + realize=bytearray_realize) + +PyByteArray_Check, PyByteArray_CheckExact = build_type_checkers("ByteArray", "w_bytearray") + +def bytearray_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyByteArrayObject with the given bytearray object + """ + py_ba = rffi.cast(PyByteArrayObject, py_obj) + py_ba.c_ob_size = len(space.str_w(w_obj)) + py_ba.c_ob_bytes = lltype.nullptr(rffi.CCHARP.TO) + py_ba.c_ob_exports = rffi.cast(rffi.INT, 0) + +def bytearray_realize(space, py_obj): + """ + Creates the bytearray in the interpreter. + """ + py_ba = rffi.cast(PyByteArrayObject, py_obj) + if not py_ba.c_ob_bytes: + py_ba.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_ba.c_ob_size + 1, + flavor='raw', zero=True) + s = rffi.charpsize2str(py_ba.c_ob_bytes, py_ba.c_ob_size) + w_obj = space.wrap(s) + py_ba.c_ob_exports = rffi.cast(rffi.INT, 0) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyObject], lltype.Void, header=None) +def bytearray_dealloc(space, py_obj): + """Frees allocated PyByteArrayObject resources. + """ + py_ba = rffi.cast(PyByteArrayObject, py_obj) + if py_ba.c_ob_bytes: + lltype.free(py_ba.c_ob_bytes, flavor="raw") + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +#_______________________________________________________________________ + + at cpython_api([PyObject], PyObject) +def PyByteArray_FromObject(space, o): + """Return a new bytearray object from any object, o, that implements the + buffer protocol. + + XXX expand about the buffer protocol, at least somewhere""" + raise NotImplementedError + + at cpython_api([rffi.CCHARP, Py_ssize_t], PyObject) +def PyByteArray_FromStringAndSize(space, string, len): + """Create a new bytearray object from string and its length, len. On + failure, NULL is returned.""" + raise NotImplementedError + + at cpython_api([PyObject, PyObject], PyObject) +def PyByteArray_Concat(space, a, b): + """Concat bytearrays a and b and return a new bytearray with the result.""" + raise NotImplementedError + + at cpython_api([PyObject], Py_ssize_t, error=-1) +def PyByteArray_Size(space, bytearray): + """Return the size of bytearray after checking for a NULL pointer.""" + raise NotImplementedError + + at cpython_api([PyObject], rffi.CCHARP) +def PyByteArray_AsString(space, bytearray): + """Return the contents of bytearray as a char array after checking for a + NULL pointer.""" + raise NotImplementedError + + at cpython_api([PyObject, Py_ssize_t], rffi.INT_real, error=-1) +def PyByteArray_Resize(space, bytearray, len): + """Resize the internal buffer of bytearray to len.""" + raise NotImplementedError + + at cpython_api([PyObject], rffi.CCHARP) +def PyByteArray_AS_STRING(space, bytearray): + """Macro version of PyByteArray_AsString().""" + raise NotImplementedError + + at cpython_api([PyObject], Py_ssize_t, error=-1) +def PyByteArray_GET_SIZE(space, bytearray): + """Macro version of PyByteArray_Size().""" + raise NotImplementedError + + diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -106,6 +106,7 @@ #include "pythonrun.h" #include "pyerrors.h" #include "sysmodule.h" +#include "bytearrayobject.h" #include "stringobject.h" #include "descrobject.h" #include "tupleobject.h" diff --git a/pypy/module/cpyext/include/bytearrayobject.h b/pypy/module/cpyext/include/bytearrayobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/bytearrayobject.h @@ -0,0 +1,33 @@ +/* ByteArray object interface */ + +#ifndef Py_BYTEARRAYOBJECT_H +#define Py_BYTEARRAYOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Type PyByteArrayObject represents a mutable array of bytes. + * The Python API is that of a sequence; + * the bytes are mapped to ints in [0, 256). + * Bytes are not characters; they may be used to encode characters. + * The only way to go between bytes and str/unicode is via encoding + * and decoding. + * For the convenience of C programmers, the bytes type is considered + * to contain a char pointer, not an unsigned char pointer. + */ + +/* Object layout */ +typedef struct { + PyObject_VAR_HEAD + /* XXX(nnorwitz): should ob_exports be Py_ssize_t? */ + int ob_exports; /* how many buffer exports */ + Py_ssize_t ob_alloc; /* How many bytes allocated */ + char *ob_bytes; +} PyByteArrayObject; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BYTEARRAYOBJECT_H */ diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -59,63 +59,6 @@ raise NotImplementedError @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyByteArray_Check(space, o): - """Return true if the object o is a bytearray object or an instance of a - subtype of the bytearray type.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyByteArray_CheckExact(space, o): - """Return true if the object o is a bytearray object, but not an instance of a - subtype of the bytearray type.""" - raise NotImplementedError - - at cpython_api([PyObject], PyObject) -def PyByteArray_FromObject(space, o): - """Return a new bytearray object from any object, o, that implements the - buffer protocol. - - XXX expand about the buffer protocol, at least somewhere""" - raise NotImplementedError - - at cpython_api([rffi.CCHARP, Py_ssize_t], PyObject) -def PyByteArray_FromStringAndSize(space, string, len): - """Create a new bytearray object from string and its length, len. On - failure, NULL is returned.""" - raise NotImplementedError - - at cpython_api([PyObject, PyObject], PyObject) -def PyByteArray_Concat(space, a, b): - """Concat bytearrays a and b and return a new bytearray with the result.""" - raise NotImplementedError - - at cpython_api([PyObject], Py_ssize_t, error=-1) -def PyByteArray_Size(space, bytearray): - """Return the size of bytearray after checking for a NULL pointer.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.CCHARP) -def PyByteArray_AsString(space, bytearray): - """Return the contents of bytearray as a char array after checking for a - NULL pointer.""" - raise NotImplementedError - - at cpython_api([PyObject, Py_ssize_t], rffi.INT_real, error=-1) -def PyByteArray_Resize(space, bytearray, len): - """Resize the internal buffer of bytearray to len.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.CCHARP) -def PyByteArray_AS_STRING(space, bytearray): - """Macro version of PyByteArray_AsString().""" - raise NotImplementedError - - at cpython_api([PyObject], Py_ssize_t, error=-1) -def PyByteArray_GET_SIZE(space, bytearray): - """Macro version of PyByteArray_Size().""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyCell_Check(space, ob): """Return true if ob is a cell object; ob must not be NULL.""" raise NotImplementedError From pypy.commits at gmail.com Fri Apr 22 08:01:17 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 22 Apr 2016 05:01:17 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: remove unused imports Message-ID: <571a128d.8673c20a.f7427.ffff9555@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83828:556f7a46c787 Date: 2016-04-22 15:00 +0300 http://bitbucket.org/pypy/pypy/changeset/556f7a46c787/ Log: remove unused imports diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,11 +2,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr, as_pyobj, Py_IncRef) + make_typedescr, get_typedescr, Py_IncRef) ## ## Implementation of PyStringObject From pypy.commits at gmail.com Fri Apr 22 11:19:04 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 08:19:04 -0700 (PDT) Subject: [pypy-commit] pypy default: ooops, thanks amaury for noticing that PyPy was still using the Message-ID: <571a40e8.26b0c20a.18b87.ffffea40@mx.google.com> Author: Armin Rigo Branch: Changeset: r83829:37479cd984a8 Date: 2016-04-22 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/37479cd984a8/ Log: ooops, thanks amaury for noticing that PyPy was still using the fallback implementation from cffi/gc_weakref.py diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -46,6 +46,7 @@ '_get_types': 'func._get_types', '_get_common_types': 'func._get_common_types', 'from_buffer': 'func.from_buffer', + 'gcp': 'func.gcp', 'string': 'func.string', 'unpack': 'func.unpack', From pypy.commits at gmail.com Fri Apr 22 12:29:30 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 09:29:30 -0700 (PDT) Subject: [pypy-commit] pypy default: port cffi/fce0884a0ff6 Message-ID: <571a516a.519d1c0a.2fe61.5a9e@mx.google.com> Author: Armin Rigo Branch: Changeset: r83830:357a144f9ea1 Date: 2016-04-22 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/357a144f9ea1/ Log: port cffi/fce0884a0ff6 diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1773,14 +1773,14 @@ def test_introspect_order(self): ffi, lib = self.prepare(""" - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) From pypy.commits at gmail.com Fri Apr 22 12:50:55 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 09:50:55 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <571a566f.d5da1c0a.89c17.606f@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r733:0d9c0fe8d6b1 Date: 2016-04-22 18:51 +0200 http://bitbucket.org/pypy/pypy.org/changeset/0d9c0fe8d6b1/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63421 of $105000 (60.4%) + $63553 of $105000 (60.5%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $30534 of $80000 (38.2%) + $30620 of $80000 (38.3%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sat Apr 23 02:41:47 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Apr 2016 23:41:47 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2280: syslog(u'foo') Message-ID: <571b192b.a1ccc20a.cadb8.fffff38d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83831:063c9eb15e03 Date: 2016-04-23 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/063c9eb15e03/ Log: Issue #2280: syslog(u'foo') diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -51,6 +51,8 @@ # if log is not opened, open it now if not _S_log_open: openlog() + if isinstance(message, unicode): + message = str(message) lib.syslog(priority, "%s", message) @builtinify From pypy.commits at gmail.com Sat Apr 23 03:19:31 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 23 Apr 2016 00:19:31 -0700 (PDT) Subject: [pypy-commit] pypy default: Change the repr of 'lib.func' to include the full signature Message-ID: <571b2203.d1981c0a.4053e.0a4c@mx.google.com> Author: Armin Rigo Branch: Changeset: r83832:7e2e9ca21aac Date: 2016-04-23 09:19 +0200 http://bitbucket.org/pypy/pypy/changeset/7e2e9ca21aac/ Log: Change the repr of 'lib.func' to include the full signature diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -92,7 +92,8 @@ return ctype._call(self.fnptr, args_w) def descr_repr(self, space): - return space.wrap("" % (self.fnname,)) + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + return space.wrap("" % (doc,)) def descr_get_doc(self, space): doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) From pypy.commits at gmail.com Sat Apr 23 04:42:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 23 Apr 2016 01:42:22 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: remove the withmapdict option and turn it on by default Message-ID: <571b356e.143f1c0a.d2220.5c38@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83833:48959a7aa4db Date: 2016-04-22 11:37 +0300 http://bitbucket.org/pypy/pypy/changeset/48959a7aa4db/ Log: remove the withmapdict option and turn it on by default there is still some cleanup needed in get_unique_interplevel_subclass diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -218,10 +218,6 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False), - BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), @@ -277,14 +273,12 @@ config.objspace.std.suggest(withprebuiltint=True) config.objspace.std.suggest(withliststrategies=True) config.objspace.std.suggest(sharesmallstr=True) - config.objspace.std.suggest(withmapdict=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,10 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -951,8 +951,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -160,7 +160,7 @@ typedef = cls.typedef if wants_dict and typedef.hasdict: wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: + if not typedef.hasdict: # mapdict only works if the type does not already have a dict if wants_del: parentcls = get_unique_interplevel_subclass(config, cls, True, True, @@ -226,7 +226,7 @@ value = func_with_new_name(value, value.func_name) body[key] = value - if (config.objspace.std.withmapdict and "dict" in features): + if "dict" in features: from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin add(BaseMapdictObject) add(ObjectMixin) @@ -300,24 +300,6 @@ return self.slots_w[index] add(Proto) - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - subcls = type(name, (supercls,), body) _allusersubcls_cache[subcls] = True return subcls diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -6,14 +6,14 @@ @unwrap_spec(generation=int) def collect(space, generation=0): "Run a full collection. The optional argument is ignored." - # First clear the method cache. See test_gc for an example of why. + # First clear the method and the map cache. + # See test_gc for an example of why. from pypy.objspace.std.typeobject import MethodCache + from pypy.objspace.std.mapdict import MapAttrCache cache = space.fromcache(MethodCache) cache.clear() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import MapAttrCache - cache = space.fromcache(MapAttrCache) - cache.clear() + cache = space.fromcache(MapAttrCache) + cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -106,7 +106,6 @@ class AppTestGcMethodCache(object): - spaceconfig = {"objspace.std.withmethodcache": True} def test_clear_method_cache(self): import gc, weakref @@ -127,10 +126,6 @@ assert r() is None -class AppTestGcMapDictIndexCache(AppTestGcMethodCache): - spaceconfig = {"objspace.std.withmethodcache": True, - "objspace.std.withmapdict": True} - def test_clear_index_cache(self): import gc, weakref rlist = [] diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -33,7 +33,7 @@ space = f.space w_obj = f.popvalue() - if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): + if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -59,8 +59,7 @@ # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) - if (space.config.objspace.std.withmapdict and - not jit.we_are_jitted()): + if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -66,7 +66,7 @@ w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) W_ModuleDictObject.__init__(w_obj, space, strategy, storage) return w_obj - elif space.config.objspace.std.withmapdict and instance: + elif instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) elif instance or strdict or module: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -583,7 +583,6 @@ pass # mainly for tests def get_subclass_of_correct_size(space, cls, w_type): - assert space.config.objspace.std.withmapdict map = w_type.terminator classes = memo_get_subclass_of_correct_size(space, cls) if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -356,8 +356,7 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if (self.config.objspace.std.withmapdict and cls is W_ObjectObject - and not w_subtype.needsdel): + if cls is W_ObjectObject and not w_subtype.needsdel: from pypy.objspace.std.mapdict import get_subclass_of_correct_size subcls = get_subclass_of_correct_size(self, cls, w_subtype) else: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1114,7 +1114,6 @@ class objspace: class std: withcelldict = False - withmapdict = False methodcachesizeexp = 11 withmethodcachecounter = False diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -5,7 +5,6 @@ class objspace: class std: withcelldict = False - withmapdict = True methodcachesizeexp = 11 withmethodcachecounter = False @@ -645,7 +644,6 @@ # XXX write more class AppTestWithMapDict(object): - spaceconfig = {"objspace.std.withmapdict": True} def test_simple(self): class A(object): @@ -862,8 +860,7 @@ class AppTestWithMapDictAndCounters(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def setup_class(cls): from pypy.interpreter import gateway @@ -1206,8 +1203,7 @@ assert got == 'd' class AppTestGlobalCaching(AppTestWithMapDict): - spaceconfig = {"objspace.std.withmethodcachecounter": True, - "objspace.std.withmapdict": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_mix_classes(self): import __pypy__ @@ -1264,8 +1260,7 @@ assert 0, "failed: got %r" % ([got[1] for got in seen],) class TestDictSubclassShortcutBug(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_bug(self): w_dict = self.space.appexec([], """(): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -176,12 +176,11 @@ # dict_w of any of the types in the mro changes, or if the mro # itself changes w_self._version_tag = VersionTag() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator - if w_self.hasdict: - w_self.terminator = DictTerminator(space, w_self) - else: - w_self.terminator = NoDictTerminator(space, w_self) + from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator + if w_self.hasdict: + w_self.terminator = DictTerminator(space, w_self) + else: + w_self.terminator = NoDictTerminator(space, w_self) def __repr__(self): "NOT_RPYTHON" From pypy.commits at gmail.com Sat Apr 23 04:42:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 23 Apr 2016 01:42:24 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: use mapdict for all the subclassing Message-ID: <571b3570.8a9d1c0a.aa077.59ac@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83834:311cb478ad96 Date: 2016-04-23 11:41 +0300 http://bitbucket.org/pypy/pypy/changeset/311cb478ad96/ Log: use mapdict for all the subclassing replace a huge mess by a different kind of (smaller) mess diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if "dict" in features: - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,39 +157,15 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,11 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + self.cls_without_del = _getusercls( + space.config, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space.config, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -450,12 +450,19 @@ INVALID = 2 SLOTS_STARTING_FROM = 3 +# a little bit of a mess of mixin classes that implement various pieces of +# objspace user object functionality in terms of mapdict -class BaseMapdictObject: - _mixin_ = True +class BaseUserClassMapdict: + # everything that's needed to use mapdict for a user subclass at all. + # This immediately makes slots possible. - def _init_empty(self, map): - raise NotImplementedError("abstract base class") + # assumes presence of _init_empty, _mapdict_read_storage, + # _mapdict_write_storage, _mapdict_storage_length, + # _set_mapdict_storage_and_map + + # _____________________________________________ + # methods needed for mapdict def _become(self, new_obj): self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) @@ -464,49 +471,11 @@ return jit.promote(self.map) def _set_mapdict_map(self, map): self.map = map + # _____________________________________________ # objspace interface - def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, attrname, DICT) - - def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, attrname, DICT, w_value) - - def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, attrname, DICT) - if new_obj is None: - return False - self._become(new_obj) - return True - - def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) - if w_dict is not None: - assert isinstance(w_dict, W_DictMultiObject) - return w_dict - - strategy = space.fromcache(MapDictStrategy) - storage = strategy.erase(self) - w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag - return w_dict - - def setdict(self, space, w_dict): - from pypy.interpreter.typedef import check_new_dictionary - w_dict = check_new_dictionary(space, w_dict) - w_olddict = self.getdict(space) - assert isinstance(w_dict, W_DictMultiObject) - # The old dict has got 'self' as dstorage, but we are about to - # change self's ("dict", SPECIAL) attribute to point to the - # new dict. If the old dict was using the MapDictStrategy, we - # have to force it now: otherwise it would remain an empty - # shell that continues to delegate to 'self'. - if type(w_olddict.get_strategy()) is MapDictStrategy: - w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag + # class access def getclass(self, space): return self._get_mapdict_map().terminator.w_cls @@ -519,9 +488,13 @@ from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space assert (not self.typedef.hasdict or + isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) + + # methods needed for slots + def getslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex return self._get_mapdict_map().read(self, "slot", index) @@ -538,7 +511,9 @@ self._become(new_obj) return True - # used by _weakref implemenation + +class MapdictWeakrefSupport(object): + # stuff used by the _weakref implementation def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline @@ -559,8 +534,69 @@ self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True -class ObjectMixin(object): - _mixin_ = True + +class MapdictDictSupport(object): + + # objspace interface for dictionary operations + + def getdictvalue(self, space, attrname): + return self._get_mapdict_map().read(self, attrname, DICT) + + def setdictvalue(self, space, attrname, w_value): + return self._get_mapdict_map().write(self, attrname, DICT, w_value) + + def deldictvalue(self, space, attrname): + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) + if new_obj is None: + return False + self._become(new_obj) + return True + + def getdict(self, space): + return _obj_getdict(self, space) + + def setdict(self, space, w_dict): + _obj_setdict(self, space, w_dict) + +# a couple of helpers for the classes above, factored out to reduce +# the translated code size + + at objectmodel.dont_inline +def _obj_getdict(self, space): + assert isinstance(self._get_mapdict_map().terminator, DictTerminator) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) + if w_dict is not None: + assert isinstance(w_dict, W_DictMultiObject) + return w_dict + + strategy = space.fromcache(MapDictStrategy) + storage = strategy.erase(self) + w_dict = W_DictObject(space, strategy, storage) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + return w_dict + + at objectmodel.dont_inline +def _obj_setdict(self, space, w_dict): + from pypy.objspace.std import dictmultiobject + assert isinstance(self._get_mapdict_map().terminator, DictTerminator) + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting dictionary to a non-dict")) + assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) + w_olddict = self.getdict(space) + assert isinstance(w_dict, W_DictMultiObject) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + +class MapdictStorageMixin(object): def _init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -579,50 +615,21 @@ self.storage = storage self.map = map -class Object(ObjectMixin, BaseMapdictObject, W_Root): +class ObjectWithoutDict(MapdictStorageMixin, BaseUserClassMapdict, MapdictWeakrefSupport, W_Root): pass # mainly for tests -def get_subclass_of_correct_size(space, cls, w_type): - map = w_type.terminator - classes = memo_get_subclass_of_correct_size(space, cls) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - return classes[0] - size = map.size_estimate() - debug.check_nonneg(size) - if size < len(classes): - return classes[size] - else: - return classes[len(classes)-1] -get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" +class Object(MapdictStorageMixin, BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, W_Root): + pass # mainly for tests -SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers -SUBCLASSES_MAX_FIELDS = 5 +SUBCLASSES_NUM_FIELDS = 5 -def memo_get_subclass_of_correct_size(space, supercls): - key = space, supercls - try: - return _subclass_cache[key] - except KeyError: - assert not hasattr(supercls, "__del__") - result = [] - for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1): - result.append(_make_subclass_size_n(supercls, i)) - for i in range(SUBCLASSES_MIN_FIELDS): - result.insert(0, result[0]) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - assert len(set(result)) == 1 - _subclass_cache[key] = result - return result -memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" -_subclass_cache = {} - -def _make_subclass_size_n(supercls, n): +def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): from rpython.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 - class subcls(BaseMapdictObject, supercls): + class subcls(object): def _init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) @@ -690,7 +697,7 @@ erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) - subcls.__name__ = supercls.__name__ + "Size%s" % n + subcls.__name__ = "Size%s" % n return subcls # ____________________________________________________________ diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -356,14 +356,8 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if cls is W_ObjectObject and not w_subtype.needsdel: - from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, cls, w_subtype) - else: - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, - w_subtype.layout.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -13,7 +13,7 @@ class Class(object): def __init__(self, hasdict=True): - self.hasdict = True + self.hasdict = hasdict if hasdict: self.terminator = DictTerminator(space, self) else: @@ -22,10 +22,17 @@ def instantiate(self, sp=None): if sp is None: sp = space - result = Object() + if self.hasdict: + result = Object() + else: + result = ObjectWithoutDict() result.user_setup(sp, self) return result +class ObjectWithoutDict(ObjectWithoutDict): + class typedef: + hasdict = False + class Object(Object): class typedef: hasdict = False @@ -429,6 +436,9 @@ assert obj.getslotvalue(b) == 60 assert obj.storage == [50, 60] assert not obj.setdictvalue(space, "a", 70) + assert obj.getdict(space) is None + assert obj.getdictvalue(space, "a") is None + def test_getdict(): cls = Class() diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -177,7 +177,13 @@ # itself changes w_self._version_tag = VersionTag() from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator - if w_self.hasdict: + # if the typedef has a dict, then the rpython-class does all the dict + # management, which means from the point of view of mapdict there is no + # dict. However, W_InstanceObjects are an exception to this + from pypy.module.__builtin__.interp_classobj import W_InstanceObject + typedef = w_self.layout.typedef + if (w_self.hasdict and not typedef.hasdict or + typedef is W_InstanceObject.typedef): w_self.terminator = DictTerminator(space, w_self) else: w_self.terminator = NoDictTerminator(space, w_self) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -211,6 +211,12 @@ func._always_inline_ = True return func +def dont_inline(func): + """ mark the function as never-to-be-inlined by the RPython optimizations + (not the JIT!), no matter its size.""" + func._dont_inline_ = True + return func + # ____________________________________________________________ From pypy.commits at gmail.com Sat Apr 23 11:39:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 23 Apr 2016 08:39:20 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <571b9728.c30a1c0a.5c087.ffffe942@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r734:feb526a4d4e1 Date: 2016-04-23 17:39 +0200 http://bitbucket.org/pypy/pypy.org/changeset/feb526a4d4e1/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63553 of $105000 (60.5%) + $63753 of $105000 (60.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30620 of $80000 (38.3%) + $30660 of $80000 (38.3%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sat Apr 23 14:18:36 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 23 Apr 2016 11:18:36 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: problems that the enabling mapdict by default found: Message-ID: <571bbc7c.8344c20a.76697.ffffc8b4@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83835:9df69444009e Date: 2016-04-23 13:37 +0300 http://bitbucket.org/pypy/pypy/changeset/9df69444009e/ Log: problems that the enabling mapdict by default found: - the mapdict cache needed an extra lookup, that is fixed - looking up non-method things via the class is bad diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -23,6 +23,7 @@ def LOOKUP_METHOD(f, nameindex, *ignored): + from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # @@ -44,7 +45,18 @@ w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) - w_descr = w_type.lookup(name) + # bit of a mess to use these internal functions, but it allows the + # mapdict caching below to work without an additional lookup + version_tag = w_type.version_tag() + if version_tag is None: + _, w_descr = w_type._lookup_where(name) + w_descr_cell = None + else: + _, w_descr_cell = w_type._pure_lookup_where_possibly_with_method_cache( + name, version_tag) + w_descr = w_descr_cell + if isinstance(w_descr, MutableCell): + w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -62,7 +74,8 @@ if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( - space, f.getcode(), name, nameindex, w_obj, w_type) + space, f.getcode(), name, nameindex, w_obj, w_type, + w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1011,22 +1011,15 @@ return False def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex, - w_obj, w_type): + w_obj, w_type, w_method): + if w_method is None or isinstance(w_method, MutableCell): + # don't cache the MutableCell XXX could be fixed + return version_tag = w_type.version_tag() - if version_tag is None: - return + assert version_tag is not None map = w_obj._get_mapdict_map() if map is None or isinstance(map.terminator, DevolvedDictTerminator): return - # We know here that w_obj.getdictvalue(space, name) just returned None, - # so the 'name' is not in the instance. We repeat the lookup to find it - # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a MutableCell --- which we - # must not cache. (It should not be None here, but you never know...) - _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( - name, version_tag) - if w_method is None or isinstance(w_method, MutableCell): - return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -202,7 +202,8 @@ l = [type.__getattribute__(A, "__new__")(A)] * 10 __pypy__.reset_method_cache_counter() for i, a in enumerate(l): - assert a.f() == 42 + # use getattr to circumvent the mapdict cache + assert getattr(a, "f")() == 42 cache_counter = __pypy__.method_cache_counter("f") assert sum(cache_counter) == 10 if cache_counter == (9, 1): @@ -225,9 +226,11 @@ assert a.x == i + 1 A.x += 1 cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] >= 350 + # XXX this is the bad case for the mapdict cache: looking up + # non-method attributes from the class + assert cache_counter[0] >= 450 assert cache_counter[1] >= 1 - assert sum(cache_counter) == 400 + assert sum(cache_counter) == 500 __pypy__.reset_method_cache_counter() a = A() From pypy.commits at gmail.com Sat Apr 23 17:38:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 23 Apr 2016 14:38:50 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <571beb6a.143f1c0a.d2220.6013@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r735:da20ab795cc8 Date: 2016-04-23 23:39 +0200 http://bitbucket.org/pypy/pypy.org/changeset/da20ab795cc8/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $63753 of $105000 (60.7%) + $63767 of $105000 (60.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30660 of $80000 (38.3%) + $30670 of $80000 (38.3%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sun Apr 24 00:10:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 23 Apr 2016 21:10:00 -0700 (PDT) Subject: [pypy-commit] pypy gcheader-decl: GC headers don't need to be in the database Message-ID: <571c4718.143f1c0a.d2220.ffffafc8@mx.google.com> Author: Ronan Lamy Branch: gcheader-decl Changeset: r83836:b05e778c895c Date: 2016-04-24 05:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b05e778c895c/ Log: GC headers don't need to be in the database diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -547,7 +547,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None @@ -678,7 +677,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None From pypy.commits at gmail.com Sun Apr 24 06:17:48 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 03:17:48 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2226 Message-ID: <571c9d4c.876cc20a.cff5.ffffb83f@mx.google.com> Author: Armin Rigo Branch: Changeset: r83837:490058ea54e6 Date: 2016-04-24 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/490058ea54e6/ Log: Issue #2226 Another tweak in the incremental GC: this should ensure that progress in the major GC occurs quickly enough in all cases. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -341,6 +341,20 @@ self.prebuilt_root_objects = self.AddressStack() # self._init_writebarrier_logic() + # + # The size of all the objects turned from 'young' to 'old' + # since we started the last major collection cycle. This is + # used to track progress of the incremental GC: normally, we + # run one major GC step after each minor collection, but if a + # lot of objects are made old, we need run two or more steps. + # Otherwise the risk is that we create old objects faster than + # we're collecting them. The 'threshold' is incremented after + # each major GC step at a fixed rate; the idea is that as long + # as 'size_objects_made_old > threshold_objects_made_old' then + # we must do more major GC steps. See major_collection_step() + # for more details. + self.size_objects_made_old = r_uint(0) + self.threshold_objects_made_old = r_uint(0) def setup(self): @@ -464,7 +478,7 @@ self.gc_nursery_debug = True else: self.gc_nursery_debug = False - self.minor_collection() # to empty the nursery + self._minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery() @@ -509,8 +523,8 @@ self.min_heap_size = max(self.min_heap_size, self.nursery_size * self.major_collection_threshold) # the following two values are usually equal, but during raw mallocs - # of arrays, next_major_collection_threshold is decremented to make - # the next major collection arrive earlier. + # with memory pressure accounting, next_major_collection_threshold + # is decremented to make the next major collection arrive earlier. # See translator/c/test/test_newgc, test_nongc_attached_to_gc self.next_major_collection_initial = self.min_heap_size self.next_major_collection_threshold = self.min_heap_size @@ -700,21 +714,58 @@ def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" - if gen <= 1: - self.minor_collection() - if gen == 1 or (self.gc_state != STATE_SCANNING and gen != -1): + if gen < 0: + self._minor_collection() # dangerous! no major GC cycle progress + elif gen <= 1: + self.minor_collection_with_major_progress() + if gen == 1 and self.gc_state == STATE_SCANNING: self.major_collection_step() else: self.minor_and_major_collection() self.rrc_invoke_callback() + def minor_collection_with_major_progress(self, extrasize=0): + """To a minor collection. Then, if there is already a major GC + in progress, run at least one major collection step. If there is + no major GC but the threshold is reached, start a major GC. + """ + self._minor_collection() + + # If the gc_state is STATE_SCANNING, we're not in the middle + # of an incremental major collection. In that case, wait + # until there is too much garbage before starting the next + # major collection. But if we are in the middle of an + # incremental major collection, then always do (at least) one + # step now. + # + # Within a major collection cycle, every call to + # major_collection_step() increments + # 'threshold_objects_made_old' by nursery_size/2. + + if self.gc_state != STATE_SCANNING or self.threshold_reached(extrasize): + self.major_collection_step(extrasize) + + # See documentation in major_collection_step() + while self.gc_state != STATE_SCANNING: # target (A1) + threshold = self.threshold_objects_made_old + if threshold >= r_uint(extrasize): + threshold -= r_uint(extrasize) + if self.size_objects_made_old <= threshold: # target (A2) + break + + self._minor_collection() + self.major_collection_step(extrasize) + + self.rrc_invoke_callback() + + def collect_and_reserve(self, totalsize): """To call when nursery_free overflows nursery_top. First check if pinned objects are in front of nursery_top. If so, jump over the pinned object and try again to reserve totalsize. - Otherwise do a minor collection, and possibly a major collection, and - finally reserve totalsize bytes. + Otherwise do a minor collection, and possibly some steps of a + major collection, and finally reserve totalsize bytes. """ minor_collection_count = 0 @@ -757,47 +808,27 @@ self.nursery_top = self.nursery_barriers.popleft() else: minor_collection_count += 1 - self.minor_collection() if minor_collection_count == 1: + self.minor_collection_with_major_progress() + else: + # Nursery too full again. This is likely because of + # execute_finalizers() or rrc_invoke_callback(). + # we need to fix it with another call to minor_collection() + # ---this time only the minor part so that we are sure that + # the nursery is empty (apart from pinned objects). # - # If the gc_state is STATE_SCANNING, we're not in - # the middle of an incremental major collection. - # In that case, wait until there is too much - # garbage before starting the next major - # collection. But if we are in the middle of an - # incremental major collection, then always do (at - # least) one step now. + # Note that this still works with the counters: + # 'size_objects_made_old' will be increased by + # the _minor_collection() below. We don't + # immediately restore the target invariant that + # 'size_objects_made_old <= threshold_objects_made_old'. + # But we will do it in the next call to + # minor_collection_with_major_progress(). # - # This will increment next_major_collection_threshold - # by nursery_size//2. If more than nursery_size//2 - # survives, then threshold_reached() might still be - # true after that. In that case we do a second step. - # The goal is to avoid too high memory peaks if the - # program allocates a lot of surviving objects. - # - if (self.gc_state != STATE_SCANNING or - self.threshold_reached()): - - self.major_collection_step() - - if (self.gc_state != STATE_SCANNING and - self.threshold_reached()): # ^^but only if still - self.minor_collection() # the same collection - self.major_collection_step() - # - self.rrc_invoke_callback() - # - # The nursery might not be empty now, because of - # execute_finalizers() or rrc_invoke_callback(). - # If it is almost full again, - # we need to fix it with another call to minor_collection(). - if self.nursery_free + totalsize > self.nursery_top: - self.minor_collection() - # - else: ll_assert(minor_collection_count == 2, - "Seeing minor_collection() at least twice." - "Too many pinned objects?") + "Calling minor_collection() twice is not " + "enough. Too many pinned objects?") + self._minor_collection() # # Tried to do something about nursery_free overflowing # nursery_top before this point. Try to reserve totalsize now. @@ -855,21 +886,9 @@ # to major_collection_step(). If there is really no memory, # then when the major collection finishes it will raise # MemoryError. - # - # The logic is to first do a minor GC only, and check if that - # was enough to free a bunch of large young objects. If it - # was, then we don't do any major collection step. - # - while self.threshold_reached(raw_malloc_usage(totalsize)): - self.minor_collection() - if self.threshold_reached(raw_malloc_usage(totalsize) + - self.nursery_size // 2): - self.major_collection_step(raw_malloc_usage(totalsize)) - self.rrc_invoke_callback() - # note that this loop should not be infinite: when the - # last step of a major collection is done but - # threshold_reached(totalsize) is still true, then - # we should get a MemoryError from major_collection_step(). + if self.threshold_reached(raw_malloc_usage(totalsize)): + self.minor_collection_with_major_progress( + raw_malloc_usage(totalsize) + self.nursery_size // 2) # # Check if the object would fit in the ArenaCollection. # Also, an object allocated from ArenaCollection must be old. @@ -1547,7 +1566,7 @@ # ---------- # Nursery collection - def minor_collection(self): + def _minor_collection(self): """Perform a minor collection: find the objects from the nursery that remain alive and move them out.""" # @@ -1718,6 +1737,10 @@ self.old_objects_pointing_to_pinned.foreach( self._reset_flag_old_objects_pointing_to_pinned, None) # + # Accounting: 'nursery_surviving_size' is the size of objects + # from the nursery that we just moved out. + self.size_objects_made_old += r_uint(self.nursery_surviving_size) + # debug_print("minor collect, total memory used:", self.get_total_memory_used()) debug_print("number of pinned objects:", @@ -1958,6 +1981,7 @@ self.header(obj).tid &= ~GCFLAG_HAS_SHADOW # totalsize = size_gc_header + self.get_size(obj) + self.nursery_surviving_size += raw_malloc_usage(totalsize) # # Copy it. Note that references to other objects in the # nursery are kept unchanged in this step. @@ -2002,6 +2026,11 @@ return hdr.tid |= GCFLAG_VISITED_RMY # + # Accounting + size_gc_header = self.gcheaderbuilder.size_gc_header + size = size_gc_header + self.get_size(obj) + self.size_objects_made_old += r_uint(raw_malloc_usage(size)) + # # we just made 'obj' old, so we need to add it to the correct lists added_somewhere = False # @@ -2084,14 +2113,14 @@ def gc_step_until(self, state): while self.gc_state != state: - self.minor_collection() + self._minor_collection() self.major_collection_step() debug_gc_step_until = gc_step_until # xxx def debug_gc_step(self, n=1): while n > 0: - self.minor_collection() + self._minor_collection() self.major_collection_step() n -= 1 @@ -2111,37 +2140,44 @@ self.debug_check_consistency() # + # 'threshold_objects_made_old', is used inside comparisons + # with 'size_objects_made_old' to know when we must do + # several major GC steps (i.e. several consecurive calls + # to the present function). Here is the target that + # we try to aim to: either (A1) or (A2) + # + # (A1) gc_state == STATE_SCANNING (i.e. major GC cycle ended) + # (A2) size_objects_made_old <= threshold_objects_made_old + # # Every call to major_collection_step() adds nursery_size//2 - # to the threshold. It is reset at the end of this function - # when the major collection is fully finished. - # + # to 'threshold_objects_made_old'. # In the common case, this is larger than the size of all # objects that survive a minor collection. After a few # minor collections (each followed by one call to # major_collection_step()) the threshold is much higher than - # the currently-in-use old memory. Then threshold_reached() - # won't be true again until the major collection fully - # finishes, time passes, and it's time for the next major - # collection. + # the 'size_objects_made_old', making the target invariant (A2) + # true by a large margin. # # However there are less common cases: # - # * if more than half of the nursery consistently survives: we - # call major_collection_step() twice after a minor - # collection; + # * if more than half of the nursery consistently survives: + # then we need two calls to major_collection_step() after + # some minor collection; # # * or if we're allocating a large number of bytes in - # external_malloc(). In that case, we are likely to reach - # again the threshold_reached() case, and more major - # collection steps will be done immediately until - # threshold_reached() returns false. + # external_malloc() and some of them survive the following + # minor collection. In that case, more than two major + # collection steps must be done immediately, until we + # restore the target invariant (A2). # - self.next_major_collection_threshold += self.nursery_size // 2 + self.threshold_objects_made_old += r_uint(self.nursery_size // 2) - # XXX currently very coarse increments, get this working then split - # to smaller increments using stacks for resuming if self.gc_state == STATE_SCANNING: + # starting a major GC cycle: reset these two counters + self.size_objects_made_old = r_uint(0) + self.threshold_objects_made_old = r_uint(self.nursery_size // 2) + self.objects_to_trace = self.AddressStack() self.collect_roots() self.gc_state = STATE_MARKING diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -19,6 +19,8 @@ BaseDirectGCTest.setup_method(self, meth) max = getattr(meth, 'max_number_of_pinned_objects', 20) self.gc.max_number_of_pinned_objects = max + if not hasattr(self.gc, 'minor_collection'): + self.gc.minor_collection = self.gc._minor_collection def test_pin_can_move(self): # even a pinned object is considered to be movable. Only the caller From pypy.commits at gmail.com Sun Apr 24 06:17:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 03:17:50 -0700 (PDT) Subject: [pypy-commit] pypy default: doc tweaks Message-ID: <571c9d4e.55301c0a.346bc.529e@mx.google.com> Author: Armin Rigo Branch: Changeset: r83838:d955f9d633d4 Date: 2016-04-24 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/d955f9d633d4/ Log: doc tweaks diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -746,13 +746,15 @@ if self.gc_state != STATE_SCANNING or self.threshold_reached(extrasize): self.major_collection_step(extrasize) - # See documentation in major_collection_step() + # See documentation in major_collection_step() for target invariants while self.gc_state != STATE_SCANNING: # target (A1) threshold = self.threshold_objects_made_old if threshold >= r_uint(extrasize): - threshold -= r_uint(extrasize) + threshold -= r_uint(extrasize) # (*) if self.size_objects_made_old <= threshold: # target (A2) break + # Note that target (A2) is tweaked by (*); see + # test_gc_set_max_heap_size in translator/c, test_newgc.py self._minor_collection() self.major_collection_step(extrasize) From pypy.commits at gmail.com Sun Apr 24 07:44:54 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 24 Apr 2016 04:44:54 -0700 (PDT) Subject: [pypy-commit] cffi default: Add my employer in AUTHORS Message-ID: <571cb1b6.4374c20a.a99d9.ffffd858@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r2682:daf21555983e Date: 2016-04-23 22:10 +0200 http://bitbucket.org/cffi/cffi/changeset/daf21555983e/ Log: Add my employer in AUTHORS diff --git a/AUTHORS b/AUTHORS --- a/AUTHORS +++ b/AUTHORS @@ -1,3 +1,8 @@ This package has been mostly done by Armin Rigo with help from Maciej Fijałkowski. The idea is heavily based (although not directly copied) from LuaJIT ffi by Mike Pall. + + +Other contributors: + + Google Inc. From pypy.commits at gmail.com Sun Apr 24 07:44:50 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 24 Apr 2016 04:44:50 -0700 (PDT) Subject: [pypy-commit] cffi default: Implement backend.gcp() for the ctypes backend, and remove gc_weakref. Message-ID: <571cb1b2.50301c0a.b22c8.2b15@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r2680:c9b855645ce9 Date: 2016-04-23 10:02 +0200 http://bitbucket.org/cffi/cffi/changeset/c9b855645ce9/ Log: Implement backend.gcp() for the ctypes backend, and remove gc_weakref. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -397,20 +397,7 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gcp = self._backend.gcp - except AttributeError: - pass - else: - return gcp(cdata, destructor) - # - with self._lock: - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + return self._backend.gcp(cdata, destructor) def _get_cached_btype(self, type): assert self._lock.acquire(False) is False diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -993,6 +993,21 @@ assert onerror is None # XXX not implemented return BType(source, error) + def gcp(self, cdata, destructor): + BType = self.typeof(cdata) + try: + gcp_type = BType._gcp_type + except AttributeError: + class CTypesDataGcp(BType): + __slots__ = ['_orig', '_destructor'] + def __del__(self): + self._destructor(self._orig) + gcp_type = BType._gcp_type = CTypesDataGcp + new_cdata = self.cast(gcp_type, cdata) + new_cdata._orig = cdata + new_cdata._destructor = destructor + return new_cdata + typeof = type def getcname(self, BType, replace_with): diff --git a/cffi/gc_weakref.py b/cffi/gc_weakref.py deleted file mode 100644 --- a/cffi/gc_weakref.py +++ /dev/null @@ -1,22 +0,0 @@ -from weakref import ref - - -class GcWeakrefs(object): - def __init__(self, ffi): - self.ffi = ffi - self.data = {} - - def build(self, cdata, destructor): - # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) - # - def remove(key): - # careful, this function is not protected by any lock - old_key = self.data.pop(index) - assert old_key is key - destructor(cdata) - # - key = ref(new_cdata, remove) - index = object() - self.data[index] = key - return new_cdata diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1524,19 +1524,14 @@ def test_gc_finite_list(self): ffi = FFI(backend=self.Backend()) - public = not hasattr(ffi._backend, 'gcp') p = ffi.new("int *", 123) keepalive = [] for i in range(10): keepalive.append(ffi.gc(p, lambda p: None)) - if public: - assert len(ffi.gc_weakrefs.data) == i + 1 del keepalive[:] import gc; gc.collect(); gc.collect() for i in range(10): keepalive.append(ffi.gc(p, lambda p: None)) - if public: - assert len(ffi.gc_weakrefs.data) == 10 def test_CData_CType(self): ffi = FFI(backend=self.Backend()) From pypy.commits at gmail.com Sun Apr 24 07:44:52 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 24 Apr 2016 04:44:52 -0700 (PDT) Subject: [pypy-commit] cffi default: Add ffi.gc(ptr, None) which *removes* the destructor in-place on a ffi.gc() object. Message-ID: <571cb1b4.10691c0a.10606.32e5@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r2681:4d19ce180883 Date: 2016-04-23 11:02 +0200 http://bitbucket.org/cffi/cffi/changeset/4d19ce180883/ Log: Add ffi.gc(ptr, None) which *removes* the destructor in-place on a ffi.gc() object. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6065,6 +6065,17 @@ &CData_Type, &origobj, &destructor)) return NULL; + if (destructor == Py_None) { + if (!PyObject_TypeCheck(origobj, &CDataGCP_Type)) { + PyErr_SetString(PyExc_TypeError, + "Can remove destructor only on a object " + "previously returned by ffi.gc()"); + return NULL; + } + Py_CLEAR(((CDataObject_gcp *)origobj)->destructor); + Py_RETURN_NONE; + } + cd = allocate_gcp_object(origobj, origobj->c_type, destructor); return (PyObject *)cd; } diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -995,13 +995,23 @@ def gcp(self, cdata, destructor): BType = self.typeof(cdata) + + if destructor is None: + if not (hasattr(BType, '_gcp_type') and + BType._gcp_type is BType): + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + cdata._destructor = None + return None + try: gcp_type = BType._gcp_type except AttributeError: class CTypesDataGcp(BType): __slots__ = ['_orig', '_destructor'] def __del__(self): - self._destructor(self._orig) + if self._destructor is not None: + self._destructor(self._orig) gcp_type = BType._gcp_type = CTypesDataGcp new_cdata = self.cast(gcp_type, cdata) new_cdata._orig = cdata diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -318,6 +318,11 @@ which means the destructor is called as soon as *this* exact returned object is garbage-collected. +**ffi.gc(ptr, None)**: removes the ownership on a object returned by a +regular call to ``ffi.gc``, and no destructor will be called when it +is garbage-collected. The object is modified in-place, and the +function returns ``None``. + Note that this should be avoided for large memory allocations or for limited resources. This is particularly true on PyPy: its GC does not know how much memory or how many resources the returned ``ptr`` diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,13 @@ ====================== +v1.next +======= + +* ``ffi.gc(p, None)`` removes the destructor on an object previously + created by another call to ``ffi.gc()`` + + v1.6 ==== diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1522,6 +1522,20 @@ import gc; gc.collect(); gc.collect(); gc.collect() assert seen == [3] + def test_gc_disable(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + py.test.raises(TypeError, ffi.gc, p, None) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + import gc; gc.collect() + assert seen == [] + assert ffi.gc(q1, None) is None + del q1, q2 + import gc; gc.collect(); gc.collect(); gc.collect() + assert seen == [2] + def test_gc_finite_list(self): ffi = FFI(backend=self.Backend()) p = ffi.new("int *", 123) From pypy.commits at gmail.com Sun Apr 24 07:45:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 04:45:26 -0700 (PDT) Subject: [pypy-commit] cffi default: Clean up Message-ID: <571cb1d6.891d1c0a.39a2b.2cfb@mx.google.com> Author: Armin Rigo Branch: Changeset: r2683:22ee329dc175 Date: 2016-04-23 10:30 +0200 http://bitbucket.org/cffi/cffi/changeset/22ee329dc175/ Log: Clean up diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -321,19 +321,20 @@ assert lib.strlen("hello") == 5 You can also pass unicode strings as ``wchar_t *`` arguments. Note that -in general, there is no difference between C argument declarations that +the C language makes no difference between argument declarations that use ``type *`` or ``type[]``. For example, ``int *`` is fully -equivalent to ``int[]`` (or even ``int[5]``; the 5 is ignored). So you -can pass an ``int *`` as a list of integers: +equivalent to ``int[]`` (or even ``int[5]``; the 5 is ignored). For CFFI, +this means that you can always pass arguments that can be converted to +either ``int *`` or ``int[]``. For example: .. code-block:: python # void do_something_with_array(int *array); - lib.do_something_with_array([1, 2, 3, 4, 5]) + lib.do_something_with_array([1, 2, 3, 4, 5]) # works for int[] See `Reference: conversions`__ for a similar way to pass ``struct foo_s -*`` arguments---but in general, it is clearer to simply pass +*`` arguments---but in general, it is clearer in this case to pass ``ffi.new('struct foo_s *', initializer)``. __ ref.html#conversions From pypy.commits at gmail.com Sun Apr 24 07:45:28 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 04:45:28 -0700 (PDT) Subject: [pypy-commit] cffi default: Lies, this didn't print a list Message-ID: <571cb1d8.022ec20a.6b68f.ffffcee4@mx.google.com> Author: Armin Rigo Branch: Changeset: r2684:684294ee51f8 Date: 2016-04-23 10:39 +0200 http://bitbucket.org/cffi/cffi/changeset/684294ee51f8/ Log: Lies, this didn't print a list diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -834,7 +834,7 @@ @ffi.callback("int(int, int *)") def python_callback(how_many, values): - print values # a list + print ffi.unpack(values, how_many) return 0 lib.python_callback = python_callback From pypy.commits at gmail.com Sun Apr 24 07:45:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 04:45:29 -0700 (PDT) Subject: [pypy-commit] cffi default: merge heads Message-ID: <571cb1d9.a2f2c20a.dfd0b.ffffcdc6@mx.google.com> Author: Armin Rigo Branch: Changeset: r2685:381f8b0c2ee0 Date: 2016-04-24 13:46 +0200 http://bitbucket.org/cffi/cffi/changeset/381f8b0c2ee0/ Log: merge heads diff --git a/AUTHORS b/AUTHORS --- a/AUTHORS +++ b/AUTHORS @@ -1,3 +1,8 @@ This package has been mostly done by Armin Rigo with help from Maciej Fijałkowski. The idea is heavily based (although not directly copied) from LuaJIT ffi by Mike Pall. + + +Other contributors: + + Google Inc. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6065,6 +6065,17 @@ &CData_Type, &origobj, &destructor)) return NULL; + if (destructor == Py_None) { + if (!PyObject_TypeCheck(origobj, &CDataGCP_Type)) { + PyErr_SetString(PyExc_TypeError, + "Can remove destructor only on a object " + "previously returned by ffi.gc()"); + return NULL; + } + Py_CLEAR(((CDataObject_gcp *)origobj)->destructor); + Py_RETURN_NONE; + } + cd = allocate_gcp_object(origobj, origobj->c_type, destructor); return (PyObject *)cd; } diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -397,20 +397,7 @@ data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. """ - try: - gcp = self._backend.gcp - except AttributeError: - pass - else: - return gcp(cdata, destructor) - # - with self._lock: - try: - gc_weakrefs = self.gc_weakrefs - except AttributeError: - from .gc_weakref import GcWeakrefs - gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self) - return gc_weakrefs.build(cdata, destructor) + return self._backend.gcp(cdata, destructor) def _get_cached_btype(self, type): assert self._lock.acquire(False) is False diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -993,6 +993,31 @@ assert onerror is None # XXX not implemented return BType(source, error) + def gcp(self, cdata, destructor): + BType = self.typeof(cdata) + + if destructor is None: + if not (hasattr(BType, '_gcp_type') and + BType._gcp_type is BType): + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + cdata._destructor = None + return None + + try: + gcp_type = BType._gcp_type + except AttributeError: + class CTypesDataGcp(BType): + __slots__ = ['_orig', '_destructor'] + def __del__(self): + if self._destructor is not None: + self._destructor(self._orig) + gcp_type = BType._gcp_type = CTypesDataGcp + new_cdata = self.cast(gcp_type, cdata) + new_cdata._orig = cdata + new_cdata._destructor = destructor + return new_cdata + typeof = type def getcname(self, BType, replace_with): diff --git a/cffi/gc_weakref.py b/cffi/gc_weakref.py deleted file mode 100644 --- a/cffi/gc_weakref.py +++ /dev/null @@ -1,22 +0,0 @@ -from weakref import ref - - -class GcWeakrefs(object): - def __init__(self, ffi): - self.ffi = ffi - self.data = {} - - def build(self, cdata, destructor): - # make a new cdata of the same type as the original one - new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) - # - def remove(key): - # careful, this function is not protected by any lock - old_key = self.data.pop(index) - assert old_key is key - destructor(cdata) - # - key = ref(new_cdata, remove) - index = object() - self.data[index] = key - return new_cdata diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -318,6 +318,11 @@ which means the destructor is called as soon as *this* exact returned object is garbage-collected. +**ffi.gc(ptr, None)**: removes the ownership on a object returned by a +regular call to ``ffi.gc``, and no destructor will be called when it +is garbage-collected. The object is modified in-place, and the +function returns ``None``. + Note that this should be avoided for large memory allocations or for limited resources. This is particularly true on PyPy: its GC does not know how much memory or how many resources the returned ``ptr`` diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,13 @@ ====================== +v1.next +======= + +* ``ffi.gc(p, None)`` removes the destructor on an object previously + created by another call to ``ffi.gc()`` + + v1.6 ==== diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1522,21 +1522,30 @@ import gc; gc.collect(); gc.collect(); gc.collect() assert seen == [3] + def test_gc_disable(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + py.test.raises(TypeError, ffi.gc, p, None) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + import gc; gc.collect() + assert seen == [] + assert ffi.gc(q1, None) is None + del q1, q2 + import gc; gc.collect(); gc.collect(); gc.collect() + assert seen == [2] + def test_gc_finite_list(self): ffi = FFI(backend=self.Backend()) - public = not hasattr(ffi._backend, 'gcp') p = ffi.new("int *", 123) keepalive = [] for i in range(10): keepalive.append(ffi.gc(p, lambda p: None)) - if public: - assert len(ffi.gc_weakrefs.data) == i + 1 del keepalive[:] import gc; gc.collect(); gc.collect() for i in range(10): keepalive.append(ffi.gc(p, lambda p: None)) - if public: - assert len(ffi.gc_weakrefs.data) == 10 def test_CData_CType(self): ffi = FFI(backend=self.Backend()) From pypy.commits at gmail.com Sun Apr 24 08:28:42 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 05:28:42 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <571cbbfa.865a1c0a.c47b8.348b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r736:e477a6aa1f52 Date: 2016-04-24 14:29 +0200 http://bitbucket.org/pypy/pypy.org/changeset/e477a6aa1f52/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63767 of $105000 (60.7%) + $63833 of $105000 (60.8%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $30670 of $80000 (38.3%) + $30681 of $80000 (38.4%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sun Apr 24 10:14:09 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 24 Apr 2016 07:14:09 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: various fixes for translation and tests Message-ID: <571cd4b1.0b1f1c0a.963b3.ffff8ca9@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83840:cfcb4f157e44 Date: 2016-04-24 17:13 +0300 http://bitbucket.org/pypy/pypy/changeset/cfcb4f157e44/ Log: various fixes for translation and tests diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -163,6 +163,7 @@ user_overridden_class = True for base in mixins_needed: objectmodel.import_from_mixin(base) + del subcls.base subcls.__name__ = name return subcls diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -579,15 +579,15 @@ @objectmodel.dont_inline def _obj_setdict(self, space, w_dict): - from pypy.objspace.std import dictmultiobject + from pypy.interpreter.error import OperationError terminator = self._get_mapdict_map().terminator assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) if not space.isinstance_w(w_dict, space.w_dict): raise OperationError(space.w_TypeError, space.wrap("setting dictionary to a non-dict")) - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) + assert isinstance(w_dict, W_DictMultiObject) w_olddict = self.getdict(space) - assert isinstance(w_dict, W_DictMultiObject) + assert isinstance(w_olddict, W_DictMultiObject) # The old dict has got 'self' as dstorage, but we are about to # change self's ("dict", SPECIAL) attribute to point to the # new dict. If the old dict was using the MapDictStrategy, we @@ -617,11 +617,22 @@ self.storage = storage self.map = map -class ObjectWithoutDict(MapdictStorageMixin, BaseUserClassMapdict, MapdictWeakrefSupport, W_Root): - pass # mainly for tests +class ObjectWithoutDict(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -class Object(MapdictStorageMixin, BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, W_Root): - pass # mainly for tests + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) + + +class Object(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) + + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) + objectmodel.import_from_mixin(MapdictDictSupport) + SUBCLASSES_NUM_FIELDS = 5 diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -599,15 +599,20 @@ def test_specialized_class(): + from pypy.objspace.std.mapdict import _make_storage_mixin_size_n from pypy.objspace.std.objectobject import W_ObjectObject - classes = memo_get_subclass_of_correct_size(space, W_ObjectObject) + classes = [_make_storage_mixin_size_n(i) for i in range(2, 10)] w1 = W_Root() w2 = W_Root() w3 = W_Root() w4 = W_Root() w5 = W_Root() w6 = W_Root() - for objectcls in classes: + for mixin in classes: + class objectcls(W_ObjectObject): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictDictSupport) + objectmodel.import_from_mixin(mixin) cls = Class() obj = objectcls() obj.user_setup(space, cls) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -535,7 +535,7 @@ def add_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: - self.weak_subclasses.append(w_subclass) # not really weak, but well + w_self.weak_subclasses.append(w_subclass) # not really weak, but well return import weakref assert isinstance(w_subclass, W_TypeObject) @@ -566,7 +566,7 @@ def get_subclasses(w_self): space = w_self.space if not space.config.translation.rweakref: - return self.weak_subclasses[:] + return w_self.weak_subclasses[:] subclasses_w = [] for ref in w_self.weak_subclasses: w_ob = ref() From pypy.commits at gmail.com Sun Apr 24 10:14:07 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 24 Apr 2016 07:14:07 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: the assert was too strong Message-ID: <571cd4af.d5da1c0a.89c17.5c7a@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83839:fc81ea0932ec Date: 2016-04-24 14:24 +0300 http://bitbucket.org/pypy/pypy/changeset/fc81ea0932ec/ Log: the assert was too strong diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -563,7 +563,8 @@ @objectmodel.dont_inline def _obj_getdict(self, space): - assert isinstance(self._get_mapdict_map().terminator, DictTerminator) + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) @@ -579,7 +580,8 @@ @objectmodel.dont_inline def _obj_setdict(self, space, w_dict): from pypy.objspace.std import dictmultiobject - assert isinstance(self._get_mapdict_map().terminator, DictTerminator) + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) if not space.isinstance_w(w_dict, space.w_dict): raise OperationError(space.w_TypeError, space.wrap("setting dictionary to a non-dict")) From pypy.commits at gmail.com Sun Apr 24 11:52:53 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 24 Apr 2016 08:52:53 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: another withmethodcache Message-ID: <571cebd5.2457c20a.ef1d2.2cc6@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83841:6900e24d4b76 Date: 2016-04-24 18:52 +0300 http://bitbucket.org/pypy/pypy/changeset/6900e24d4b76/ Log: another withmethodcache diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,7 +106,6 @@ """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', 'itertools', 'time', 'binascii', 'micronumpy']) - spaceconfig['std.withmethodcache'] = True enable_leak_checking = True From pypy.commits at gmail.com Sun Apr 24 11:59:03 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 24 Apr 2016 08:59:03 -0700 (PDT) Subject: [pypy-commit] pypy gcheader-decl: document branch Message-ID: <571ced47.a1ccc20a.cadb8.4681@mx.google.com> Author: Ronan Lamy Branch: gcheader-decl Changeset: r83842:84b003d05fec Date: 2016-04-24 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/84b003d05fec/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,7 @@ .. startrev: aa60332382a1 .. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 + +.. branch: gcheader-decl + +Reduce the size of generated C sources. From pypy.commits at gmail.com Sun Apr 24 12:01:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 24 Apr 2016 09:01:11 -0700 (PDT) Subject: [pypy-commit] pypy gcheader-decl: Close branch gcheader-decl Message-ID: <571cedc7.0308c20a.d0571.2420@mx.google.com> Author: Ronan Lamy Branch: gcheader-decl Changeset: r83843:ba896fd0d3f5 Date: 2016-04-24 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/ba896fd0d3f5/ Log: Close branch gcheader-decl From pypy.commits at gmail.com Sun Apr 24 12:01:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 24 Apr 2016 09:01:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in gcheader-decl (pull request #432) Message-ID: <571cede3.e779c20a.5dd80.21b8@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83844:054e2f4e2e53 Date: 2016-04-24 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/054e2f4e2e53/ Log: Merged in gcheader-decl (pull request #432) Reduce size of the generated C sources fixes issue #2281 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,7 @@ .. startrev: aa60332382a1 .. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 + +.. branch: gcheader-decl + +Reduce the size of generated C sources. diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -547,7 +547,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None @@ -678,7 +677,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None From pypy.commits at gmail.com Sun Apr 24 13:38:36 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Apr 2016 10:38:36 -0700 (PDT) Subject: [pypy-commit] pypy default: typo (thanks LarstiQ) Message-ID: <571d049c.432f1c0a.c1bb4.ffffa92d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83845:35301c9c4051 Date: 2016-04-24 19:38 +0200 http://bitbucket.org/pypy/pypy/changeset/35301c9c4051/ Log: typo (thanks LarstiQ) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -726,7 +726,7 @@ def minor_collection_with_major_progress(self, extrasize=0): - """To a minor collection. Then, if there is already a major GC + """Do a minor collection. Then, if there is already a major GC in progress, run at least one major collection step. If there is no major GC but the threshold is reached, start a major GC. """ From pypy.commits at gmail.com Mon Apr 25 02:40:21 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 24 Apr 2016 23:40:21 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: moved .../metainterp/jitlog.py to rlib (since the vm implementer uses the jitlog directly) Message-ID: <571dbbd5.c4efc20a.9e9e3.0d0c@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83846:c55eaccdb3a6 Date: 2016-04-21 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/c55eaccdb3a6/ Log: moved .../metainterp/jitlog.py to rlib (since the vm implementer uses the jitlog directly) implemented compression (common prefix) for strings that are encoded in the debug merge point diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -6,7 +6,7 @@ from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.debug import have_debug_prints_for from rpython.rlib.jit import PARAMETERS -from rpython.rlib import jitlog +from rpython.rlib import jitlog as jl from rpython.rlib.nonconst import NonConstant from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict from rpython.rlib.rarithmetic import intmask, r_uint @@ -684,24 +684,25 @@ if get_location_ptr is not None: types = self.jitdriver_sd._get_loc_types unwrap_greenkey = self.make_unwrap_greenkey() - unrolled_types = unrolling_iterable(enumerate(types)) + unrolled_types = unrolling_iterable(types) def get_location(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) - tuple_ptr = fn(*greenargs) - # - flag = intmask(tuple_ptr.item0) - value_tuple = tuple_ptr.item1 - ntuple = () - for i,(_,t) in unrolled_types: - if t == "s": - ntuple += (hlstr(getattr(value_tuple, 'item' + str(i))),) - elif t == "i": - ntuple += (intmask(getattr(value_tuple, 'item' + str(i))),) + value_tuple = fn(*greenargs) + values = [] + i = 0 + for sem_type,gen_type in unrolled_types: + if gen_type == "s": + value = getattr(value_tuple, 'item' + str(i)) + values.append(jl.wrap(sem_type,gen_type,hlstr(value))) + elif gen_type == "i": + value = getattr(value_tuple, 'item' + str(i)) + values.append(jl.wrap(sem_type,gen_type,intmask(value))) else: raise NotImplementedError - return flag, ntuple - self.get_location_types = types + i += 1 + return values + self.get_location_types = list(types) self.get_location = get_location else: self.get_location_types = None diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -1,5 +1,75 @@ +import sys +import weakref +import struct +import os -# generic parameters +from rpython.rlib.rvmprof import cintf +from rpython.jit.metainterp import resoperation as resoperations +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import ConstInt, ConstFloat +from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.objectmodel import compute_unique_id, always_inline +from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rlib.unroll import unrolling_iterable + +def commonprefix(m): + "Given a list of pathnames, returns the longest common leading component" + if not m: return '' + s1 = min(m) + s2 = max(m) + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + return s1 + + at always_inline +def encode_str(string): + return encode_le_32bit(len(string)) + string + + at always_inline +def encode_le_16bit(val): + return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) + + at always_inline +def encode_le_32bit(val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff)]) + + at always_inline +def encode_le_64bit(val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff), + chr((val >> 32) & 0xff), + chr((val >> 40) & 0xff), + chr((val >> 48) & 0xff), + chr((val >> 56)& 0xff)]) + + at always_inline +def encode_le_addr(val): + if IS_32_BIT: + return encode_le_32bit(val) + else: + return encode_le_64bit(val) + +def encode_type(type, value): + if type == "s": + return encode_str(value) + elif type == "q": + return encode_le_64bit(value) + elif type == "i": + return encode_le_32bit(value) + elif type == "h": + return encode_le_32bit(value) + else: + raise NotImplementedError + + +# more variable parameters MP_STR = (0x0, "s") MP_INT = (0x0, "i") @@ -7,11 +77,62 @@ MP_FILENAME = (0x1, "s") MP_LINENO = (0x2, "i") MP_INDEX = (0x4, "i") -MP_ENCLOSING = (0x8, "s") +MP_SCOPE = (0x8, "s") MP_OPCODE = (0x10, "s") -def get_type(flag): - pass +class WrappedValue(object): + def encode(self, i, prefixes): + raise NotImplementedError + +class StringValue(WrappedValue): + def __init__(self, sem_type, gen_type, value): + self.value = value + + def encode(self, log, i, prefixes): + str_value = self.value + if len(str_value) < 5: + enc_value = encode_str(chr(0xff) + str_value) + else: + cp = commonprefix([prefixes[i], str_value]) + if cp != prefixes[i]: + if len(cp) == 0: + # they are fully different! + prefixes[i] = str_value + enc_value = encode_str(chr(0xff) + str_value) + else: + # the prefix changed + prefixes[i] = cp + # common prefix of field i + assert i != 0xff + log._write_marked(MARK_COMMON_PREFIX, chr(i) \ + + encode_str(cp)) + enc_value = encode_str(chr(i) + str_value) + else: + enc_value = encode_str(chr(i) + str_value) + # + if prefixes[i] is None: + prefixes[i] = str_value + return enc_value + +class IntValue(WrappedValue): + def __init__(self, sem_type, gen_type, value): + self.value = value + + def encode(self, log, i, prefixes): + return encode_le_64bit(self.value) + +# note that a ... +# "semantic_type" is an integer denoting which meaning does a type at a merge point have +# there are very common ones that are predefined. E.g. MP_FILENAME +# "generic_type" is one of the primitive types supported (string,int) + + at specialize.argtype(2) +def wrap(sem_type, gen_type, value): + if isinstance(value, int): + return IntValue(sem_type, gen_type, value) + elif isinstance(value, str): + return StringValue(sem_type, gen_type, value) + raise NotImplementedError def returns(*args): """ Decorate your get_location function to specify the types. @@ -26,3 +147,305 @@ method._loc_types = args return method return decor + +JITLOG_VERSION = 1 +JITLOG_VERSION_16BIT_LE = struct.pack(" as two unsigend longs + le_addr1 = encode_le_addr(absaddr) + le_addr2 = encode_le_addr(absaddr + rel) + log._write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) + for i,op in enumerate(ops): + if rop.DEBUG_MERGE_POINT == op.getopnum(): + self.encode_debug_info(op) + continue + mark, line = self.encode_op(op) + log._write_marked(mark, line) + self.write_core_dump(ops, i, op, ops_offset) + + self.memo = {} + + def encode_once(self): + pass + + def encode_debug_info(self, op): + # the idea is to write the debug merge point as it's own well known + # tag. Compression for common prefixes is implemented: + + log = self.logger + jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] + if not jd_sd.warmstate.get_location: + return + values = jd_sd.warmstate.get_location(op.getarglist()[3:]) + if values is None: + # indicates that this function is not provided to the jit driver + return + types = jd_sd.warmstate.get_location_types + + if self.common_prefix is None: + # first time visiting a merge point + # setup the common prefix + self.common_prefix = [""] * len(types) + encoded_types = [] + for i, (semantic_type, _) in enumerate(types): + encoded_types.append(chr(semantic_type)) + log._write_marked(MARK_INIT_MERGE_POINT, ''.join(encoded_types)) + + # the types have already been written + encoded = encode_merge_point(log, self.common_prefix, values) + log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, encoded) + + def encode_op(self, op): + """ an operation is written as follows: + \ + \ + ,,...,, + The marker indicates if the last argument is + a descr or a normal argument. + """ + str_args = [self.var_to_str(arg) for arg in op.getarglist()] + descr = op.getdescr() + le_opnum = encode_le_16bit(op.getopnum()) + str_res = self.var_to_str(op) + line = ','.join([str_res] + str_args) + if descr: + descr_str = descr.repr_of_descr() + line = line + ',' + descr_str + string = encode_str(line) + descr_number = compute_unique_id(descr) + le_descr_number = encode_le_addr(descr_number) + return MARK_RESOP_DESCR, le_opnum + string + le_descr_number + else: + string = encode_str(line) + return MARK_RESOP, le_opnum + string + + + def write_core_dump(self, operations, i, op, ops_offset): + if self.mc is None: + return + + op2 = None + j = i+1 + # find the next op that is in the offset hash + while j < len(operations): + op2 = operations[j] + if op in ops_offset: + break + j += 1 + + # this op has no known offset in the machine code (it might be + # a debug operation) + if op not in ops_offset: + return + # there is no well defined boundary for the end of the + # next op in the assembler + if op2 is not None and op2 not in ops_offset: + return + dump = [] + + start_offset = ops_offset[op] + assert start_offset >= 0 + # end offset is either the last pos in the assembler + # or the offset of op2 + if op2 is None: + end_offset = self.mc.get_relative_pos() + else: + end_offset = ops_offset[op2] + + count = end_offset - start_offset + dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset, count) + offset = encode_le_16bit(start_offset) + edump = encode_str(dump) + self.logger._write_marked(MARK_ASM, offset + edump) + + def var_to_str(self, arg): + try: + mv = self.memo[arg] + except KeyError: + mv = len(self.memo) + self.memo[arg] = mv + if isinstance(arg, ConstInt): + if self.metainterp_sd and int_could_be_an_address(arg.value): + addr = arg.getaddr() + name = self.metainterp_sd.get_name_from_address(addr) + if name: + return 'ConstClass(' + name + ')' + return str(arg.value) + elif self.ts is not None and isinstance(arg, self.ts.ConstRef): + if arg.value: + return 'ConstPtr(ptr' + str(mv) + ')' + return 'ConstPtr(null)' + if isinstance(arg, ConstFloat): + return str(arg.getfloat()) + elif arg is None: + return 'None' + elif arg.is_vector(): + return 'v' + str(mv) + elif arg.type == 'i': + return 'i' + str(mv) + elif arg.type == 'r': + return 'p' + str(mv) + elif arg.type == 'f': + return 'f' + str(mv) + else: + return '?' + +def int_could_be_an_address(x): + if we_are_translated(): + x = rffi.cast(lltype.Signed, x) # force it + return not (-32768 <= x <= 32767) + else: + return isinstance(x, llmemory.AddressAsInt) diff --git a/rpython/rlib/test/test_jitlog.py b/rpython/rlib/test/test_jitlog.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_jitlog.py @@ -0,0 +1,56 @@ +from rpython.jit.tool.oparser import pure_parse +from rpython.jit.metainterp.optimizeopt.util import equaloplists +from rpython.jit.metainterp.resoperation import ResOperation, rop +from rpython.jit.backend.model import AbstractCPU +from rpython.jit.metainterp.history import ConstInt, ConstPtr +from rpython.rlib.jitlog import (encode_str, encode_le_16bit, encode_le_64bit) +from rpython.rlib import jitlog as jl +import tempfile + +class TestLogger(object): + + def make_metainterp_sd(self): + class FakeJitDriver(object): + class warmstate(object): + @staticmethod + def get_location(greenkey_list): + assert len(greenkey_list) == 0 + return '/home/pypy/jit.py', 0, 'enclosed', 99, 'DEL' + get_location_types = [(jl.MP_FILENAME,'s'),(0x0,'i'),(jl.MP_SCOPE,'s'), (0x0,'i'), (jl.MP_OPCODE, 's')] + + class FakeMetaInterpSd: + cpu = AbstractCPU() + cpu.ts = None + jitdrivers_sd = [FakeJitDriver()] + def get_name_from_address(self, addr): + return 'Name' + return FakeMetaInterpSd() + + def test_debug_merge_point(self, tmpdir): + logger = jitlog.VMProfJitLogger() + file = tmpdir.join('binary_file') + file.ensure() + fd = file.open('wb') + logger.cintf.jitlog_init(fd.fileno()) + log_trace = logger.log_trace(0, self.make_metainterp_sd(), None) + op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) + log_trace.write([], [op]) + #the next line will close 'fd' + fd.close() + logger.finish() + binary = file.read() + assert binary.startswith(b'\x00\x04\x00\x00\x00loop') + assert binary.endswith(b'\x24' + \ + encode_str('/home/pypy/jit.py') + \ + encode_le_16bit(0) + \ + encode_str('enclosed') + \ + encode_le_64bit(99) + \ + encode_str('DEL')) + + class FakeLog(object): + def _write_marked(self, id, text): + pass + + def test_common_prefix(self): + fakelog = FakeLog() + logger = jitlog.LogTrace(0x0, {}, None, None, fakelog) From pypy.commits at gmail.com Mon Apr 25 02:40:23 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 24 Apr 2016 23:40:23 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: hurrayy! the new get_location function API is now extensible and pypy translates (this took me quite a while) Message-ID: <571dbbd7.2450c20a.74c2.52c5@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83847:a02168e4834f Date: 2016-04-25 08:38 +0200 http://bitbucket.org/pypy/pypy/changeset/a02168e4834f/ Log: hurrayy! the new get_location function API is now extensible and pypy translates (this took me quite a while) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import (Const, VOID, ConstInt) from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.jit.metainterp.compile import ResumeGuardDescr -from rpython.jit.metainterp.jitlog import MARK_TRACE_ASM +from rpython.rlib.jitlog import MARK_TRACE_ASM from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -17,7 +17,7 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resume import (PENDINGFIELDSP, ResumeDataDirectReader, AccumInfo) -from rpython.jit.metainterp.jitlog import MARK_TRACE_OPT +from rpython.rlib.jitlog import MARK_TRACE_OPT from rpython.jit.metainterp.resumecode import NUMBERING from rpython.jit.codewriter import heaptracker, longlong diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,351 +0,0 @@ -from rpython.rlib.rvmprof import cintf -from rpython.jit.metainterp import resoperation as resoperations -from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import ConstInt, ConstFloat -from rpython.rlib.objectmodel import we_are_translated -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rlib.objectmodel import compute_unique_id, always_inline -from rpython.rlib import jitlog as jl -import sys -import weakref -import struct - -JITLOG_VERSION = 1 -JITLOG_VERSION_16BIT_LE = struct.pack("> 0) & 0xff) + chr((val >> 8) & 0xff) - - at always_inline -def encode_le_32bit(val): - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff)]) - - at always_inline -def encode_le_64bit(val): - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff), - chr((val >> 32) & 0xff), - chr((val >> 40) & 0xff), - chr((val >> 48) & 0xff), - chr((val >> 56)& 0xff)]) - - at always_inline -def encode_le_addr(val): - if IS_32_BIT: - return encode_le_32bit(val) - else: - return encode_le_64bit(val) - -def encode_type(type, value): - if type == "s": - return encode_str(value) - elif type == "q": - return encode_le_64bit(value) - elif type == "i": - return encode_le_32bit(value) - elif type == "h": - return encode_le_32bit(value) - else: - raise NotImplementedError - -def assemble_header(): - version = JITLOG_VERSION_16BIT_LE - count = len(resoperations.opname) - content = [version, chr(MARK_RESOP_META), - encode_le_16bit(count)] - for opnum, opname in resoperations.opname.items(): - content.append(encode_le_16bit(opnum)) - content.append(encode_str(opname.lower())) - return ''.join(content) - -class VMProfJitLogger(object): - def __init__(self): - self.cintf = cintf.setup() - self.memo = {} - self.trace_id = 0 - - def setup_once(self): - if self.cintf.jitlog_enabled(): - return - self.cintf.jitlog_try_init_using_env() - if not self.cintf.jitlog_enabled(): - return - blob = assemble_header() - self.cintf.jitlog_write_marked(MARK_JITLOG_HEADER, blob, len(blob)) - - def finish(self): - self.cintf.jitlog_teardown() - - def start_new_trace(self, faildescr=None, entry_bridge=False): - if not self.cintf.jitlog_enabled(): - return - content = [encode_le_addr(self.trace_id)] - if faildescr: - content.append(encode_str('bridge')) - descrnmr = compute_unique_id(faildescr) - content.append(encode_le_addr(descrnmr)) - else: - content.append(encode_str('loop')) - content.append(encode_le_addr(int(entry_bridge))) - self._write_marked(MARK_START_TRACE, ''.join(content)) - self.trace_id += 1 - - def _write_marked(self, mark, line): - if not we_are_translated(): - assert self.cintf.jitlog_enabled() - self.cintf.jitlog_write_marked(mark, line, len(line)) - - def log_jit_counter(self, struct): - if not self.cintf.jitlog_enabled(): - return - le_addr = encode_le_addr(struct.number) - # not an address (but a number) but it is a machine word - le_count = encode_le_addr(struct.i) - self._write_marked(MARK_JITLOG_COUNTER, le_addr + le_count) - - def log_trace(self, tag, metainterp_sd, mc, memo=None): - if not self.cintf.jitlog_enabled(): - return EMPTY_TRACE_LOG - assert isinstance(tag, int) - if memo is None: - memo = {} - return LogTrace(tag, memo, metainterp_sd, mc, self) - - def log_patch_guard(self, descr_number, addr): - if not self.cintf.jitlog_enabled(): - return - le_descr_number = encode_le_addr(descr_number) - le_addr = encode_le_addr(addr) - lst = [le_descr_number, le_addr] - self._write_marked(MARK_STITCH_BRIDGE, ''.join(lst)) - -class BaseLogTrace(object): - def write_trace(self, trace): - return None - - def write(self, args, ops, ops_offset={}): - return None - -EMPTY_TRACE_LOG = BaseLogTrace() - -class LogTrace(BaseLogTrace): - def __init__(self, tag, memo, metainterp_sd, mc, logger): - self.memo = memo - self.metainterp_sd = metainterp_sd - self.ts = None - if self.metainterp_sd is not None: - self.ts = metainterp_sd.cpu.ts - self.tag = tag - self.mc = mc - self.logger = logger - self.merge_point_file = None - - def write_trace(self, trace): - ops = [] - i = trace.get_iter() - while not i.done(): - ops.append(i.next()) - self.write(i.inputargs, ops) - - def write(self, args, ops, ops_offset={}): - log = self.logger - log._write_marked(self.tag, encode_le_addr(self.logger.trace_id)) - - # input args - str_args = [self.var_to_str(arg) for arg in args] - string = encode_str(','.join(str_args)) - log._write_marked(MARK_INPUT_ARGS, string) - - # assembler address (to not duplicate it in write_code_dump) - if self.mc is not None: - absaddr = self.mc.absolute_addr() - rel = self.mc.get_relative_pos() - # packs as two unsigend longs - le_addr1 = encode_le_addr(absaddr) - le_addr2 = encode_le_addr(absaddr + rel) - log._write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) - for i,op in enumerate(ops): - if rop.DEBUG_MERGE_POINT == op.getopnum(): - self.encode_debug_info(op) - continue - mark, line = self.encode_op(op) - log._write_marked(mark, line) - self.write_core_dump(ops, i, op, ops_offset) - - self.memo = {} - - def encode_once(self): - pass - - def encode_debug_info(self, op): - log = self.logger - jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] - if not jd_sd.warmstate.get_location: - return - types = jd_sd.warmstate.get_location_types - values = jd_sd.warmstate.get_location(op.getarglist()[3:]) - if values is None: - # indicates that this function is not provided to the jit driver - return - - if self.merge_point_file is None: - # first time visiting a merge point - positions = jd_sd.warmstate.get_location_positions - encoded_types = [] - for i, (semantic_type, _) in enumerate(positions): - encoded_types.append(chr(semantic_type)) - if semantic_type == jl.MP_FILENAME: - self.common_prefix = values[i] - log._write_marked(MARK_INIT_MERGE_POINT, ''.join(encoded_types)) - - - # the types have already been written - line = [] - for i,(sem_type,gen_type) in enumerate(types): - value = values[i] - if sem_type == jl.PM_FILENAME: - self.common_prefix = os.path.commonpath([self.common_prefix, value]) - log._write_marked(MARK_COMMON_PREFIX, chr(jl.PM_FILENAME) + \ - encode_str(self.common_prefix)) - line.append(encode_type(gen_type, value)) - log._write_marked(MARK_JITLOG_DEBUG_MERGE_POINT, ''.join(line)) - - - def encode_op(self, op): - """ an operation is written as follows: - \ - \ - ,,...,, - The marker indicates if the last argument is - a descr or a normal argument. - """ - str_args = [self.var_to_str(arg) for arg in op.getarglist()] - descr = op.getdescr() - le_opnum = encode_le_16bit(op.getopnum()) - str_res = self.var_to_str(op) - line = ','.join([str_res] + str_args) - if descr: - descr_str = descr.repr_of_descr() - line = line + ',' + descr_str - string = encode_str(line) - descr_number = compute_unique_id(descr) - le_descr_number = encode_le_addr(descr_number) - return MARK_RESOP_DESCR, le_opnum + string + le_descr_number - else: - string = encode_str(line) - return MARK_RESOP, le_opnum + string - - - def write_core_dump(self, operations, i, op, ops_offset): - if self.mc is None: - return - - op2 = None - j = i+1 - # find the next op that is in the offset hash - while j < len(operations): - op2 = operations[j] - if op in ops_offset: - break - j += 1 - - # this op has no known offset in the machine code (it might be - # a debug operation) - if op not in ops_offset: - return - # there is no well defined boundary for the end of the - # next op in the assembler - if op2 is not None and op2 not in ops_offset: - return - dump = [] - - start_offset = ops_offset[op] - assert start_offset >= 0 - # end offset is either the last pos in the assembler - # or the offset of op2 - if op2 is None: - end_offset = self.mc.get_relative_pos() - else: - end_offset = ops_offset[op2] - - count = end_offset - start_offset - dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset, count) - offset = encode_le_16bit(start_offset) - edump = encode_str(dump) - self.logger._write_marked(MARK_ASM, offset + edump) - - def var_to_str(self, arg): - try: - mv = self.memo[arg] - except KeyError: - mv = len(self.memo) - self.memo[arg] = mv - if isinstance(arg, ConstInt): - if self.metainterp_sd and int_could_be_an_address(arg.value): - addr = arg.getaddr() - name = self.metainterp_sd.get_name_from_address(addr) - if name: - return 'ConstClass(' + name + ')' - return str(arg.value) - elif self.ts is not None and isinstance(arg, self.ts.ConstRef): - if arg.value: - return 'ConstPtr(ptr' + str(mv) + ')' - return 'ConstPtr(null)' - if isinstance(arg, ConstFloat): - return str(arg.getfloat()) - elif arg is None: - return 'None' - elif arg.is_vector(): - return 'v' + str(mv) - elif arg.type == 'i': - return 'i' + str(mv) - elif arg.type == 'r': - return 'p' + str(mv) - elif arg.type == 'f': - return 'f' + str(mv) - else: - return '?' - -def int_could_be_an_address(x): - if we_are_translated(): - x = rffi.cast(lltype.Signed, x) # force it - return not (-32768 <= x <= 32767) - else: - return isinstance(x, llmemory.AddressAsInt) diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -7,7 +7,7 @@ from rpython.jit.metainterp.optimizeopt.simplify import OptSimplify from rpython.jit.metainterp.optimizeopt.pure import OptPure from rpython.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from rpython.jit.metainterp.jitlog import MARK_TRACE +from rpython.rlib.jitlog import MARK_TRACE from rpython.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.debug import debug_start, debug_stop, debug_print diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -13,7 +13,7 @@ from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.resoperation import rop, OpHelpers, GuardResOp -from rpython.jit.metainterp import jitlog +from rpython.rlib import jitlog as jl from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.debug import have_debug_prints, make_sure_not_resized @@ -1760,7 +1760,7 @@ self.cpu = cpu self.stats = self.cpu.stats self.options = options - self.jitlog = jitlog.VMProfJitLogger() + self.jitlog = jl.VMProfJitLogger() self.logger_noopt = Logger(self) self.logger_ops = Logger(self, guard_number=True) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -684,14 +684,13 @@ if get_location_ptr is not None: types = self.jitdriver_sd._get_loc_types unwrap_greenkey = self.make_unwrap_greenkey() - unrolled_types = unrolling_iterable(types) + unrolled_types = unrolling_iterable(enumerate(types)) def get_location(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) value_tuple = fn(*greenargs) values = [] - i = 0 - for sem_type,gen_type in unrolled_types: + for i, (sem_type,gen_type) in unrolled_types: if gen_type == "s": value = getattr(value_tuple, 'item' + str(i)) values.append(jl.wrap(sem_type,gen_type,hlstr(value))) @@ -700,7 +699,6 @@ values.append(jl.wrap(sem_type,gen_type,intmask(value))) else: raise NotImplementedError - i += 1 return values self.get_location_types = list(types) self.get_location = get_location diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -81,7 +81,7 @@ MP_OPCODE = (0x10, "s") class WrappedValue(object): - def encode(self, i, prefixes): + def encode(self, log, i, prefixes): raise NotImplementedError class StringValue(WrappedValue): @@ -89,30 +89,31 @@ self.value = value def encode(self, log, i, prefixes): - str_value = self.value - if len(str_value) < 5: - enc_value = encode_str(chr(0xff) + str_value) - else: - cp = commonprefix([prefixes[i], str_value]) - if cp != prefixes[i]: - if len(cp) == 0: - # they are fully different! - prefixes[i] = str_value - enc_value = encode_str(chr(0xff) + str_value) - else: - # the prefix changed - prefixes[i] = cp - # common prefix of field i - assert i != 0xff - log._write_marked(MARK_COMMON_PREFIX, chr(i) \ - + encode_str(cp)) - enc_value = encode_str(chr(i) + str_value) - else: - enc_value = encode_str(chr(i) + str_value) - # - if prefixes[i] is None: - prefixes[i] = str_value - return enc_value + return encode_str(self.value) + #str_value = self.value + #if len(str_value) < 5: + # enc_value = encode_str(chr(0xff) + str_value) + #else: + # cp = commonprefix([prefixes[i], str_value]) + # if cp != prefixes[i]: + # if len(cp) == 0: + # # they are fully different! + # prefixes[i] = str_value + # enc_value = encode_str(chr(0xff) + str_value) + # else: + # # the prefix changed + # prefixes[i] = cp + # # common prefix of field i + # assert i != 0xff + # log._write_marked(MARK_COMMON_PREFIX, chr(i) \ + # + encode_str(cp)) + # enc_value = encode_str(chr(i) + str_value) + # else: + # enc_value = encode_str(chr(i) + str_value) + ## + #if prefixes[i] is None: + # prefixes[i] = str_value + #return enc_value class IntValue(WrappedValue): def __init__(self, sem_type, gen_type, value): @@ -264,9 +265,6 @@ unrolled = unrolling_iterable(values) i = 0 for value in unrolled: - cp = value.encode_common_prefix(i, prefixes) - if cp is not None: - log._write_marked(MARK_COMMON_PREFIX, cp) line.append(value.encode(log,i,prefixes)) i += 1 return ''.join(line) From pypy.commits at gmail.com Mon Apr 25 03:14:28 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 00:14:28 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: reenabled the prefix compression and added a new test to check commonprefix Message-ID: <571dc3d4.022ec20a.6b68f.0eb7@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83848:02a45c61ac94 Date: 2016-04-25 09:13 +0200 http://bitbucket.org/pypy/pypy/changeset/02a45c61ac94/ Log: reenabled the prefix compression and added a new test to check commonprefix diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -13,15 +13,19 @@ from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -def commonprefix(m): +def commonprefix(a,b): "Given a list of pathnames, returns the longest common leading component" - if not m: return '' - s1 = min(m) - s2 = max(m) - for i, c in enumerate(s1): - if c != s2[i]: - return s1[:i] - return s1 + assert a is not None + assert b is not None + la = len(a) + lb = len(b) + c = min(la,lb) + if c == 0: + return "" + for i in range(c): + if a[i] != b[i]: + return a[:i] # partly matching + return a # full match @always_inline def encode_str(string): @@ -89,31 +93,30 @@ self.value = value def encode(self, log, i, prefixes): - return encode_str(self.value) - #str_value = self.value - #if len(str_value) < 5: - # enc_value = encode_str(chr(0xff) + str_value) - #else: - # cp = commonprefix([prefixes[i], str_value]) - # if cp != prefixes[i]: - # if len(cp) == 0: - # # they are fully different! - # prefixes[i] = str_value - # enc_value = encode_str(chr(0xff) + str_value) - # else: - # # the prefix changed - # prefixes[i] = cp - # # common prefix of field i - # assert i != 0xff - # log._write_marked(MARK_COMMON_PREFIX, chr(i) \ - # + encode_str(cp)) - # enc_value = encode_str(chr(i) + str_value) - # else: - # enc_value = encode_str(chr(i) + str_value) - ## - #if prefixes[i] is None: - # prefixes[i] = str_value - #return enc_value + str_value = self.value + if len(str_value) < 5: + enc_value = encode_str(chr(0xff) + str_value) + else: + cp = commonprefix([prefixes[i], str_value]) + if cp != prefixes[i]: + if len(cp) == 0: + # they are fully different! + prefixes[i] = str_value + enc_value = encode_str(chr(0xff) + str_value) + else: + # the prefix changed + prefixes[i] = cp + # common prefix of field i + assert i != 0xff + log._write_marked(MARK_COMMON_PREFIX, chr(i) \ + + encode_str(cp)) + enc_value = encode_str(chr(i) + str_value) + else: + enc_value = encode_str(chr(i) + str_value) + # + if prefixes[i] is None: + prefixes[i] = str_value + return enc_value class IntValue(WrappedValue): def __init__(self, sem_type, gen_type, value): diff --git a/rpython/rlib/test/test_jitlog.py b/rpython/rlib/test/test_jitlog.py --- a/rpython/rlib/test/test_jitlog.py +++ b/rpython/rlib/test/test_jitlog.py @@ -1,3 +1,4 @@ +import py from rpython.jit.tool.oparser import pure_parse from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -5,7 +6,6 @@ from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.rlib.jitlog import (encode_str, encode_le_16bit, encode_le_64bit) from rpython.rlib import jitlog as jl -import tempfile class TestLogger(object): @@ -54,3 +54,12 @@ def test_common_prefix(self): fakelog = FakeLog() logger = jitlog.LogTrace(0x0, {}, None, None, fakelog) + + def test_common_prefix_func(self): + assert jl.commonprefix("","") == "" + assert jl.commonprefix("/hello/world","/path/to") == "/" + assert jl.commonprefix("pyramid","python") == "py" + assert jl.commonprefix("0"*100,"0"*100) == "0"*100 + with py.test.raises(AssertionError): + jl.commonprefix(None,None) + From pypy.commits at gmail.com Mon Apr 25 03:28:39 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 00:28:39 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: A branch to get rid of the FAIL_xxx() in 'src/int.h' and keep Message-ID: <571dc727.8673c20a.f7427.14d4@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83849:8211368e7e9e Date: 2016-04-25 08:56 +0200 http://bitbucket.org/pypy/pypy/changeset/8211368e7e9e/ Log: A branch to get rid of the FAIL_xxx() in 'src/int.h' and keep only the raisingops.py variant, which can then be more easily special- cased in the JIT From pypy.commits at gmail.com Mon Apr 25 03:28:41 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 00:28:41 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: Goal: kill this amount of code. Now I have to make things work again :-) Message-ID: <571dc729.0b1f1c0a.963b3.ffffa037@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83850:7b1fbbe1f814 Date: 2016-04-25 09:24 +0200 http://bitbucket.org/pypy/pypy/changeset/7b1fbbe1f814/ Log: Goal: kill this amount of code. Now I have to make things work again :-) diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -213,11 +213,6 @@ default=False), BoolOption("merge_if_blocks", "Merge if ... elif chains", cmdline="--if-block-merge", default=True), - BoolOption("raisingop2direct_call", - "Transform operations that can implicitly raise an " - "exception into calls to functions that explicitly " - "raise exceptions", - default=False, cmdline="--raisingop2direct_call"), BoolOption("mallocs", "Remove mallocs", default=True), BoolOption("constfold", "Constant propagation", default=True), diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -166,8 +166,6 @@ # ____________________________________________________________ # # This list corresponds to the operations implemented by the LLInterpreter. -# Note that many exception-raising operations can be replaced by calls -# to helper functions in rpython.rtyper.raisingops. # ***** Run test_lloperation after changes. ***** LL_OPERATIONS = { @@ -191,18 +189,14 @@ 'int_is_true': LLOp(canfold=True), 'int_neg': LLOp(canfold=True), - 'int_neg_ovf': LLOp(canraise=(OverflowError,), tryfold=True), 'int_abs': LLOp(canfold=True), - 'int_abs_ovf': LLOp(canraise=(OverflowError,), tryfold=True), 'int_invert': LLOp(canfold=True), 'int_add': LLOp(canfold=True), 'int_sub': LLOp(canfold=True), 'int_mul': LLOp(canfold=True), - 'int_floordiv': LLOp(canfold=True), - 'int_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), - 'int_mod': LLOp(canfold=True), - 'int_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), + 'int_floordiv': LLOp(canfold=True), # C-like behavior for neg num + 'int_mod': LLOp(canfold=True), # C-like behavior for neg num 'int_lt': LLOp(canfold=True), 'int_le': LLOp(canfold=True), 'int_eq': LLOp(canfold=True), @@ -218,20 +212,6 @@ 'int_between': LLOp(canfold=True), # a <= b < c 'int_force_ge_zero': LLOp(canfold=True), # 0 if a < 0 else a - 'int_add_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - 'int_add_nonneg_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - # ^^^ more efficient version when 2nd arg is nonneg - 'int_sub_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - 'int_mul_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - # the following operations overflow in one case: (-sys.maxint-1) // (-1) - 'int_floordiv_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - 'int_floordiv_ovf_zer': LLOp(canraise=(OverflowError, ZeroDivisionError), - tryfold=True), - 'int_mod_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - 'int_mod_ovf_zer': LLOp(canraise=(OverflowError, ZeroDivisionError), - tryfold=True), - 'int_lshift_ovf': LLOp(canraise=(OverflowError,), tryfold=True), - 'uint_is_true': LLOp(canfold=True), 'uint_invert': LLOp(canfold=True), @@ -239,9 +219,7 @@ 'uint_sub': LLOp(canfold=True), 'uint_mul': LLOp(canfold=True), 'uint_floordiv': LLOp(canfold=True), - 'uint_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'uint_mod': LLOp(canfold=True), - 'uint_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'uint_lt': LLOp(canfold=True), 'uint_le': LLOp(canfold=True), 'uint_eq': LLOp(canfold=True), @@ -280,9 +258,7 @@ 'llong_sub': LLOp(canfold=True), 'llong_mul': LLOp(canfold=True), 'llong_floordiv': LLOp(canfold=True), - 'llong_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'llong_mod': LLOp(canfold=True), - 'llong_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'llong_lt': LLOp(canfold=True), 'llong_le': LLOp(canfold=True), 'llong_eq': LLOp(canfold=True), @@ -302,9 +278,7 @@ 'ullong_sub': LLOp(canfold=True), 'ullong_mul': LLOp(canfold=True), 'ullong_floordiv': LLOp(canfold=True), - 'ullong_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'ullong_mod': LLOp(canfold=True), - 'ullong_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'ullong_lt': LLOp(canfold=True), 'ullong_le': LLOp(canfold=True), 'ullong_eq': LLOp(canfold=True), @@ -326,9 +300,7 @@ 'lllong_sub': LLOp(canfold=True), 'lllong_mul': LLOp(canfold=True), 'lllong_floordiv': LLOp(canfold=True), - 'lllong_floordiv_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'lllong_mod': LLOp(canfold=True), - 'lllong_mod_zer': LLOp(canraise=(ZeroDivisionError,), tryfold=True), 'lllong_lt': LLOp(canfold=True), 'lllong_le': LLOp(canfold=True), 'lllong_eq': LLOp(canfold=True), diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -1,4 +1,3 @@ -from rpython.translator.backendopt.raisingop2direct_call import raisingop2direct_call from rpython.translator.backendopt import removenoops from rpython.translator.backendopt import inline from rpython.translator.backendopt.malloc import remove_mallocs @@ -34,7 +33,7 @@ def backend_optimizations(translator, graphs=None, secondary=False, inline_graph_from_anywhere=False, **kwds): # sensible keywords are - # raisingop2direct_call, inline_threshold, mallocs + # inline_threshold, mallocs # merge_if_blocks, constfold, heap2stack # clever_malloc_removal, remove_asserts @@ -50,9 +49,6 @@ print "before optimizations:" print_statistics(translator.graphs[0], translator, "per-graph.txt") - if config.raisingop2direct_call: - raisingop2direct_call(translator, graphs) - if config.remove_asserts: constfold(config, graphs) remove_asserts(translator, graphs) diff --git a/rpython/translator/backendopt/raisingop2direct_call.py b/rpython/translator/backendopt/raisingop2direct_call.py deleted file mode 100644 --- a/rpython/translator/backendopt/raisingop2direct_call.py +++ /dev/null @@ -1,64 +0,0 @@ -from rpython.translator.backendopt.support import log, all_operations, annotate -import rpython.rtyper.raisingops - - -log = log.raisingop2directcall - -def is_raisingop(op): - s = op.opname - if (not s.startswith('int_') and not s.startswith('uint_') and - not s.startswith('float_') and not s.startswith('llong_')): - return False - if not s.endswith('_zer') and not s.endswith('_ovf') and not s.endswith('_val'): #not s in special_operations: - return False - return True - -def raisingop2direct_call(translator, graphs=None): - """search for operations that could raise an exception and change that - operation into a direct_call to a function from the raisingops module. - This function also needs to be annotated and specialized. - - note: this could be extended to allow for any operation to be changed into - a direct_call to a (RPython) function! - """ - #special_operations = "int_floordiv int_mod".split() - if graphs is None: - graphs = translator.graphs - - log('starting') - seen = {} - for op in all_operations(graphs): - if not is_raisingop(op): - continue - func = getattr(rpython.rtyper.raisingops, op.opname, None) - if not func: - log.warning("%s not found" % op.opname) - continue - if op.opname not in seen: - seen[op.opname] = 0 - seen[op.opname] += 1 - op.args.insert(0, annotate(translator, func, op.result, op.args)) - op.opname = 'direct_call' - - #statistics... - for k, v in seen.iteritems(): - log("%dx %s" % (v, k)) - - #specialize newly annotated functions - if seen != {}: - translator.rtyper.specialize_more_blocks() - - #rename some operations (that were introduced in the newly specialized graphs) - #so this transformation becomes idempotent... - #for op in all_operations(graphs): - # if op.opname in special_operations: - # log('renamed %s to %s_' % (op.opname, op.opname)) - # op.opname += '_' - - #selfdiagnostics... assert that there are no more raisingops - for op in all_operations(graphs): - if is_raisingop(op): - log.warning("%s not transformed" % op.opname) - - #translator.view() - log('finished') diff --git a/rpython/translator/backendopt/test/test_all.py b/rpython/translator/backendopt/test/test_all.py --- a/rpython/translator/backendopt/test/test_all.py +++ b/rpython/translator/backendopt/test/test_all.py @@ -135,10 +135,8 @@ return 33 + big() + g(10) t = self.translateopt(idempotent, [int, int], - raisingop2direct_call=True, constfold=False) - #backend_optimizations(t, raisingop2direct_call=True, - # inline_threshold=0, constfold=False) + #backend_optimizations(t, inline_threshold=0, constfold=False) digest1 = md5digest(t) @@ -155,8 +153,7 @@ #XXX Inlining and constfold are currently non-idempotent. # Maybe they just renames variables but the graph changes in some way. - backend_optimizations(t, raisingop2direct_call=True, - inline_threshold=0, constfold=False) + backend_optimizations(t, inline_threshold=0, constfold=False) digest3 = md5digest(t) compare(digest1, digest3) diff --git a/rpython/translator/backendopt/test/test_raisingop2direct_call.py b/rpython/translator/backendopt/test/test_raisingop2direct_call.py deleted file mode 100644 --- a/rpython/translator/backendopt/test/test_raisingop2direct_call.py +++ /dev/null @@ -1,107 +0,0 @@ -from rpython.translator.backendopt import raisingop2direct_call, support -from rpython.rtyper.test.test_llinterp import get_interpreter -from rpython.rlib.rarithmetic import ovfcheck - -import sys - -import py - - -def get_runner(f, exceptedop, types): - values = [t() for t in types] - interp, graph = get_interpreter(f, values) - for op in support.graph_operations(graph): - if op.opname == exceptedop: - break - else: - assert False, "op %r not found!"%(exceptedop,) - t = interp.typer.annotator.translator # FIIISH! - raisingop2direct_call.raisingop2direct_call(t, [graph]) - def ret(*args): - assert map(type, args) == types - return interp.eval_graph(graph, args) - return ret - -def test_test_machinery(): - def f(x, y): - try: - return x + y - except OverflowError: - return 123 - py.test.raises(AssertionError, "get_runner(f, 'int_add_ovf', [int, int])") - def f(x, y): - try: - return ovfcheck(x + y) - except OverflowError: - return 123 - fn = get_runner(f, 'int_add_ovf', [int, int]) - res = fn(0, 0) - assert res == 0 - - -def test_division(): - def f(x, y): - try: - return x//y - except ZeroDivisionError: - return 123 - fn = get_runner(f, 'int_floordiv_zer', [int, int]) - res = fn(1, 0) - assert res == 123 - res = fn(-5, 2) - assert res == -3 - - def h(x, y): - try: - return ovfcheck(x//y) - except OverflowError: - return 123 - except ZeroDivisionError: - return 246 - hn = get_runner(h, 'int_floordiv_ovf_zer', [int, int]) - res = hn(-sys.maxint-1, -1) - assert res == 123 - res = hn(1, 0) - assert res == 246 - res = hn(-5, 2) - assert res == -3 - -def test_modulo(): - def f(x, y): - try: - return x%y - except ZeroDivisionError: - return 123 - fn = get_runner(f, 'int_mod_zer', [int, int]) - res = fn(0, 0) - assert res == 123 - res = fn(-5, 2) - assert res == 1 - - - # this becomes an int_mod_ovf_zer already? -## def g(x, y): -## try: -## return ovfcheck(x%y) -## except OverflowError: -## return 123 -## gn = get_runner(g, 'int_mod_ovf', [int, int]) -## res = gn(-sys.maxint-1, -1) -## assert res == 123 -## res = gn(-5, 2) -## assert res == -3 - - def h(x, y): - try: - return ovfcheck(x%y) - except OverflowError: - return 123 - except ZeroDivisionError: - return 246 - hn = get_runner(h, 'int_mod_ovf_zer', [int, int]) - res = hn(-sys.maxint-1, -1) - assert res == 123 - res = hn(1, 0) - assert res == 246 - res = hn(-5, 2) - assert res == 1 diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -19,8 +19,7 @@ t.buildrtyper().specialize() if all_opts: backend_optimizations(t, inline_threshold=INLINE_THRESHOLD_FOR_TEST, - constfold=False, - raisingop2direct_call=False) + constfold=False) graph = graphof(t, fn) if option.view: t.view() diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -800,7 +800,6 @@ srcdir / 'debug_traceback.c', # ifdef HAVE_RTYPER srcdir / 'asm.c', srcdir / 'instrument.c', - srcdir / 'int.c', srcdir / 'stack.c', srcdir / 'threadlocal.c', ] diff --git a/rpython/translator/c/src/asm_gcc_x86.c b/rpython/translator/c/src/asm_gcc_x86.c --- a/rpython/translator/c/src/asm_gcc_x86.c +++ b/rpython/translator/c/src/asm_gcc_x86.c @@ -5,12 +5,6 @@ #include #include -# if 0 /* disabled */ -void op_int_overflowed(void) -{ - FAIL_OVF("integer operation"); -} -# endif # ifdef PYPY_X86_CHECK_SSE2 void pypy_x86_check_sse2(void) diff --git a/rpython/translator/c/src/exception.c b/rpython/translator/c/src/exception.c --- a/rpython/translator/c/src/exception.c +++ b/rpython/translator/c/src/exception.c @@ -32,14 +32,6 @@ RPyClearException(); \ } while (0) -/* implementations */ - -void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc) -{ - /* XXX msg is ignored */ - RPyRaiseException(RPYTHON_TYPE_OF_EXC_INST(rexc), rexc); -} - /******************************************************************/ #endif /* HAVE_RTYPER */ diff --git a/rpython/translator/c/src/exception.h b/rpython/translator/c/src/exception.h --- a/rpython/translator/c/src/exception.h +++ b/rpython/translator/c/src/exception.h @@ -35,9 +35,4 @@ RPyClearException(); \ } while (0) -/* prototypes */ - -RPY_EXTERN -void _RPyRaiseSimpleException(RPYTHON_EXCEPTION rexc); - #endif diff --git a/rpython/translator/c/src/int.c b/rpython/translator/c/src/int.c deleted file mode 100644 --- a/rpython/translator/c/src/int.c +++ /dev/null @@ -1,45 +0,0 @@ -#include "common_header.h" -#include "structdef.h" -#include "forwarddecl.h" -#include "preimpl.h" -#include -#include -#include - -/* adjusted from intobject.c, Python 2.3.3 */ - -long long op_llong_mul_ovf(long long a, long long b) -{ - double doubled_longprod; /* (double)longprod */ - double doubleprod; /* (double)a * (double)b */ - long long longprod; - - longprod = a * b; - doubleprod = (double)a * (double)b; - doubled_longprod = (double)longprod; - - /* Fast path for normal case: small multiplicands, and no info - is lost in either method. */ - if (doubled_longprod == doubleprod) - return longprod; - - /* Somebody somewhere lost info. Close enough, or way off? Note - that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). - The difference either is or isn't significant compared to the - true value (of which doubleprod is a good approximation). - */ - { - const double diff = doubled_longprod - doubleprod; - const double absdiff = diff >= 0.0 ? diff : -diff; - const double absprod = doubleprod >= 0.0 ? doubleprod : - -doubleprod; - /* absdiff/absprod <= 1/32 iff - 32 * absdiff <= absprod -- 5 good bits is "close enough" */ - if (32.0 * absdiff <= absprod) - return longprod; - - FAIL_OVF("integer multiplication"); - return -1; - } -} - diff --git a/rpython/translator/c/src/int.h b/rpython/translator/c/src/int.h --- a/rpython/translator/c/src/int.h +++ b/rpython/translator/c/src/int.h @@ -21,16 +21,8 @@ #define OP_INT_INVERT(x,r) r = ~(x) #define OP_INT_NEG(x,r) r = -(x) -#define OP_INT_NEG_OVF(x,r) \ - if ((x) == SIGNED_MIN) FAIL_OVF("integer negate"); \ - OP_INT_NEG(x,r) - #define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) -#define OP_INT_ABS_OVF(x,r) \ - if ((x) == SIGNED_MIN) FAIL_OVF("integer absolute"); \ - OP_INT_ABS(x,r) - /*** binary operations ***/ #define OP_INT_EQ(x,y,r) r = ((x) == (y)) @@ -53,36 +45,9 @@ /* addition, subtraction */ #define OP_INT_ADD(x,y,r) r = (x) + (y) - -/* cast to avoid undefined behaviour on overflow */ -#define OP_INT_ADD_OVF(x,y,r) \ - r = (Signed)((Unsigned)x + y); \ - if ((r^x) < 0 && (r^y) < 0) FAIL_OVF("integer addition") - -#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ - r = (Signed)((Unsigned)x + y); \ - if ((r&~x) < 0) FAIL_OVF("integer addition") - #define OP_INT_SUB(x,y,r) r = (x) - (y) - -#define OP_INT_SUB_OVF(x,y,r) \ - r = (Signed)((Unsigned)x - y); \ - if ((r^x) < 0 && (r^~y) < 0) FAIL_OVF("integer subtraction") - #define OP_INT_MUL(x,y,r) r = (x) * (y) -#if SIZEOF_LONG * 2 <= SIZEOF_LONG_LONG && !defined(_WIN64) -#define OP_INT_MUL_OVF(x,y,r) \ - { \ - long long _lr = (long long)x * y; \ - r = (long)_lr; \ - if (_lr != (long long)r) FAIL_OVF("integer multiplication"); \ - } -#else -#define OP_INT_MUL_OVF(x,y,r) \ - r = op_llong_mul_ovf(x, y) /* long == long long */ -#endif - /* shifting */ /* NB. shifting has same limitations as C: the shift count must be @@ -111,11 +76,6 @@ #define OP_ULLONG_LSHIFT(x,y,r) CHECK_SHIFT_RANGE(y, PYPY_LONGLONG_BIT); \ r = (x) << (y) -#define OP_INT_LSHIFT_OVF(x,y,r) \ - OP_INT_LSHIFT(x,y,r); \ - if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(Signed, r, (y))) \ - FAIL_OVF("x< Author: Richard Plangger Branch: new-jit-log Changeset: r83851:44a816042aee Date: 2016-04-25 10:17 +0200 http://bitbucket.org/pypy/pypy/changeset/44a816042aee/ Log: more tests stressing the prefix compression in the log diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -85,38 +85,30 @@ MP_OPCODE = (0x10, "s") class WrappedValue(object): - def encode(self, log, i, prefixes): + def encode(self, log, i, compressor): raise NotImplementedError class StringValue(WrappedValue): def __init__(self, sem_type, gen_type, value): self.value = value - def encode(self, log, i, prefixes): + def encode(self, log, i, compressor): str_value = self.value - if len(str_value) < 5: - enc_value = encode_str(chr(0xff) + str_value) + last_prefix = compressor.get_last_written(i) + cp = compressor.compress(i, str_value) + if cp is None: + return chr(0xff) + encode_str(str_value) + else: - cp = commonprefix([prefixes[i], str_value]) - if cp != prefixes[i]: - if len(cp) == 0: - # they are fully different! - prefixes[i] = str_value - enc_value = encode_str(chr(0xff) + str_value) - else: - # the prefix changed - prefixes[i] = cp - # common prefix of field i - assert i != 0xff - log._write_marked(MARK_COMMON_PREFIX, chr(i) \ - + encode_str(cp)) - enc_value = encode_str(chr(i) + str_value) + cp_len = len(cp) + if cp == last_prefix: + # we have the same prefix + pass else: - enc_value = encode_str(chr(i) + str_value) - # - if prefixes[i] is None: - prefixes[i] = str_value - return enc_value + compressor.write(log, i, cp) + if len(str_value) == len(cp): + return "\xef" + return chr(i) + encode_str(str_value[len(cp):]) class IntValue(WrappedValue): def __init__(self, sem_type, gen_type, value): @@ -263,12 +255,42 @@ EMPTY_TRACE_LOG = BaseLogTrace() -def encode_merge_point(log, prefixes, values): +class PrefixCompressor(object): + def __init__(self, count): + self.prefixes = [None] * count + self.written_prefixes = [None] * count + + def get_last(self, index): + return self.prefixes[index] + + def get_last_written(self, index): + return self.written_prefixes[index] + + def compress(self, index, string): + assert string is not None + last = self.get_last(index) + if last is None: + self.prefixes[index] = string + return None + cp = commonprefix(last, string) + if len(cp) <= 1: # prevent common prefix '/' + self.prefixes[index] = string + return None + return cp + + + def write(self, log, index, prefix): + # we have a new prefix + log._write_marked(MARK_COMMON_PREFIX, chr(index) \ + + encode_str(prefix)) + self.written_prefixes[index] = prefix + +def encode_merge_point(log, compressor, values): line = [] unrolled = unrolling_iterable(values) i = 0 for value in unrolled: - line.append(value.encode(log,i,prefixes)) + line.append(value.encode(log,i,compressor)) i += 1 return ''.join(line) @@ -339,10 +361,11 @@ if self.common_prefix is None: # first time visiting a merge point # setup the common prefix - self.common_prefix = [""] * len(types) + self.common_prefix = PrefixCompressor(len(types)) encoded_types = [] - for i, (semantic_type, _) in enumerate(types): + for i, (semantic_type, generic_type) in enumerate(types): encoded_types.append(chr(semantic_type)) + encoded_types.append(chr(generic_type)) log._write_marked(MARK_INIT_MERGE_POINT, ''.join(encoded_types)) # the types have already been written diff --git a/rpython/rlib/test/test_jitlog.py b/rpython/rlib/test/test_jitlog.py --- a/rpython/rlib/test/test_jitlog.py +++ b/rpython/rlib/test/test_jitlog.py @@ -7,6 +7,13 @@ from rpython.rlib.jitlog import (encode_str, encode_le_16bit, encode_le_64bit) from rpython.rlib import jitlog as jl +class FakeLog(object): + def __init__(self): + self.values = [] + + def _write_marked(self, id, text): + self.values.append(chr(id) + text) + class TestLogger(object): def make_metainterp_sd(self): @@ -27,7 +34,7 @@ return FakeMetaInterpSd() def test_debug_merge_point(self, tmpdir): - logger = jitlog.VMProfJitLogger() + logger = jl.VMProfJitLogger() file = tmpdir.join('binary_file') file.ensure() fd = file.open('wb') @@ -47,13 +54,37 @@ encode_le_64bit(99) + \ encode_str('DEL')) - class FakeLog(object): - def _write_marked(self, id, text): - pass - def test_common_prefix(self): fakelog = FakeLog() - logger = jitlog.LogTrace(0x0, {}, None, None, fakelog) + compressor = jl.PrefixCompressor(1) + # nothing to compress yet! + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','hello')]) + assert result == b"\xff\x05\x00\x00\x00hello" + assert fakelog.values == [] + # + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','hello')]) + assert result == b"\xef" + assert fakelog.values == ["\x25\x00\x05\x00\x00\x00hello"] + # + fakelog.values = [] + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heiter')]) + assert result == b"\x00\x04\x00\x00\x00iter" + assert fakelog.values == ["\x25\x00\x02\x00\x00\x00he"] + # + fakelog.values = [] + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heute')]) + assert result == b"\x00\x03\x00\x00\x00ute" + assert fakelog.values == [] + # + fakelog.values = [] + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','welt')]) + assert result == b"\xff\x04\x00\x00\x00welt" + assert fakelog.values == [] + # + fakelog.values = [] + result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','welle')]) + assert result == b"\x00\x02\x00\x00\x00le" + assert fakelog.values == ["\x25\x00\x03\x00\x00\x00wel"] def test_common_prefix_func(self): assert jl.commonprefix("","") == "" From pypy.commits at gmail.com Mon Apr 25 04:24:21 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 01:24:21 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: Remove 'sharesmallstr' Message-ID: <571dd435.50301c0a.b22c8.ffff84b5@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83852:d905304eb5a8 Date: 2016-04-25 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d905304eb5a8/ Log: Remove 'sharesmallstr' diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,11 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -272,7 +267,6 @@ if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) config.objspace.std.suggest(withliststrategies=True) - config.objspace.std.suggest(sharesmallstr=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -841,29 +841,12 @@ return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') -W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] -del i def wrapstr(space, s): - if space.config.objspace.std.sharesmallstr: - # share characters and empty string - if len(s) <= 1: - if len(s) == 0: - return W_BytesObject.EMPTY - else: - s = s[0] # annotator hint: a single char - return wrapchar(space, s) return W_BytesObject(s) -def wrapchar(space, c): - if space.config.objspace.std.sharesmallstr and not we_are_jitted(): - return W_BytesObject.PREBUILT[ord(c)] - else: - return W_BytesObject(c) - - W_BytesObject.typedef = TypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -795,6 +795,3 @@ return 42 x = Foo() assert "hello" + x == 42 - -class AppTestShare(AppTestBytesObject): - spaceconfig = {"objspace.std.sharesmallstr": True} From pypy.commits at gmail.com Mon Apr 25 05:26:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 02:26:14 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: cleanups Message-ID: <571de2b6.c9921c0a.f41ad.ffffa4dc@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83853:9f7ebf38a251 Date: 2016-04-25 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/9f7ebf38a251/ Log: cleanups diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -52,7 +52,7 @@ _, w_descr = w_type._lookup_where(name) w_descr_cell = None else: - _, w_descr_cell = w_type._pure_lookup_where_possibly_with_method_cache( + _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( name, version_tag) w_descr = w_descr_cell if isinstance(w_descr, MutableCell): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -69,7 +69,7 @@ elif instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - elif instance or strdict or module: + elif strdict or module: assert w_type is None strategy = space.fromcache(BytesDictStrategy) elif kwargs: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -977,7 +977,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a MutableCell, which may change without changing the version_tag - _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( + _, w_descr = w_type._pure_lookup_where_with_method_cache( name, version_tag) # attrname, index = ("", INVALID) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -432,6 +432,8 @@ class AppTestListObject(object): + spaceconfig = {"objspace.std.withliststrategies": True} # it's the default + def setup_class(cls): import platform import sys diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,3 +1,4 @@ +import weakref from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root, SpaceCache from pypy.interpreter.error import oefmt, OperationError @@ -9,6 +10,7 @@ from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, elidable, dont_look_inside, unroll_safe) from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint class MutableCell(W_Root): @@ -85,6 +87,10 @@ for i in range(len(self.lookup_where)): self.lookup_where[i] = None_None +class _Global(object): + weakref_warning_printed = False +_global = _Global() + class Layout(object): """A Layout is attached to every W_TypeObject to represent the @@ -372,6 +378,7 @@ @unroll_safe def _lookup(w_self, key): + # nowadays, only called from ../../tool/ann_override.py space = w_self.space for w_class in w_self.mro_w: w_value = w_class.getdictvalue(space, key) @@ -381,7 +388,7 @@ @unroll_safe def _lookup_where(w_self, key): - # like lookup() but also returns the parent class in which the + # like _lookup() but also returns the parent class in which the # attribute was found space = w_self.space for w_class in w_self.mro_w: @@ -415,9 +422,6 @@ return w_class, w_value.unwrap_cell(space) return tup_w # don't make a new tuple, reuse the old one - def _pure_lookup_where_possibly_with_method_cache(w_self, name, version_tag): - return w_self._pure_lookup_where_with_method_cache(name, version_tag) - @elidable def _pure_lookup_where_with_method_cache(w_self, name, version_tag): space = w_self.space @@ -535,9 +539,15 @@ def add_subclass(w_self, w_subclass): space = w_self.space if not space.config.translation.rweakref: - w_self.weak_subclasses.append(w_subclass) # not really weak, but well - return - import weakref + # We don't have weakrefs! In this case, every class stores + # subclasses in a non-weak list. ALL CLASSES LEAK! To make + # the user aware of this annoying fact, print a warning. + if we_are_translated() and not _global.weakref_warning_printed: + from rpython.rlib import debug + debug.debug_print("Warning: no weakref support in this PyPy. " + "All user-defined classes will leak!") + _global.weakref_warning_printed = True + assert isinstance(w_subclass, W_TypeObject) newref = weakref.ref(w_subclass) for i in range(len(w_self.weak_subclasses)): @@ -550,13 +560,6 @@ def remove_subclass(w_self, w_subclass): space = w_self.space - if not space.config.translation.rweakref: - for i in range(len(w_self.weak_subclasses)): - w_cls = w_self.weak_subclasses[i] - if w_cls is w_subclass: - del w_self.weak_subclasses[i] - return - return for i in range(len(w_self.weak_subclasses)): ref = w_self.weak_subclasses[i] if ref() is w_subclass: @@ -565,8 +568,6 @@ def get_subclasses(w_self): space = w_self.space - if not space.config.translation.rweakref: - return w_self.weak_subclasses[:] subclasses_w = [] for ref in w_self.weak_subclasses: w_ob = ref() diff --git a/rpython/rtyper/lltypesystem/rbuilder.py b/rpython/rtyper/lltypesystem/rbuilder.py --- a/rpython/rtyper/lltypesystem/rbuilder.py +++ b/rpython/rtyper/lltypesystem/rbuilder.py @@ -1,5 +1,5 @@ from rpython.rlib import rgc, jit -from rpython.rlib.objectmodel import enforceargs +from rpython.rlib.objectmodel import enforceargs, dont_inline, always_inline from rpython.rlib.rarithmetic import ovfcheck, r_uint, intmask from rpython.rtyper.debug import ll_assert from rpython.rlib.unroll import unrolling_iterable @@ -37,15 +37,6 @@ # ------------------------------------------------------------ -def dont_inline(func): - func._dont_inline_ = True - return func - -def always_inline(func): - func._always_inline_ = True - return func - - STRINGPIECE = lltype.GcStruct('stringpiece', ('buf', lltype.Ptr(STR)), ('prev_piece', lltype.Ptr(lltype.GcForwardReference()))) From pypy.commits at gmail.com Mon Apr 25 06:06:09 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 03:06:09 -0700 (PDT) Subject: [pypy-commit] pypy default: More test fixes after 490058ea54e6 Message-ID: <571dec11.22c8c20a.9fab7.5544@mx.google.com> Author: Armin Rigo Branch: Changeset: r83854:eb8a73f89d3d Date: 2016-04-25 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/eb8a73f89d3d/ Log: More test fixes after 490058ea54e6 diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -617,7 +617,7 @@ oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj)) assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0 - self.gc.minor_collection() + self.gc._minor_collection() self.gc.visit_all_objects_step(1) assert oldhdr.tid & incminimark.GCFLAG_VISITED @@ -628,7 +628,7 @@ assert self.gc.header(self.gc.old_objects_pointing_to_young.tolist()[0]) == oldhdr - self.gc.minor_collection() + self.gc._minor_collection() self.gc.debug_check_consistency() def test_sweeping_simple(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -22,7 +22,7 @@ if major: self.gc.collect() else: - self.gc.minor_collection() + self.gc._minor_collection() count1 = len(self.trigger) self.gc.rrc_invoke_callback() count2 = len(self.trigger) From pypy.commits at gmail.com Mon Apr 25 06:33:05 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 03:33:05 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: Now, if an RPython program uses weakrefs when translation.rweakref is Message-ID: <571df261.47afc20a.2f58b.6bdb@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83855:0713002caf76 Date: 2016-04-25 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/0713002caf76/ Log: Now, if an RPython program uses weakrefs when translation.rweakref is False, we don't get a translation crash; instead we get non-weak references. diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -46,11 +46,12 @@ ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.malloc_varsize_ptr = self.inittime_helper( ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False) - self.weakref_create_ptr = self.inittime_helper( - ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, - inline=False) - self.weakref_deref_ptr = self.inittime_helper( - ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) + if self.translator.config.translation.rweakref: + self.weakref_create_ptr = self.inittime_helper( + ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr, + inline=False) + self.weakref_deref_ptr = self.inittime_helper( + ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) self.identityhash_ptr = self.inittime_helper( ll_identityhash, [llmemory.Address], lltype.Signed, inline=False) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -236,8 +236,9 @@ annmodel.s_None) self.annotate_walker_functions(getfn) - self.weakref_deref_ptr = self.inittime_helper( - ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) + if translator.config.translation.rweakref: + self.weakref_deref_ptr = self.inittime_helper( + ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address) classdef = bk.getuniqueclassdef(GCClass) s_gc = annmodel.SomeInstance(classdef) diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -7,7 +7,14 @@ import weakref from rpython.annotator.model import UnionError -ref = weakref.ref # basic regular weakrefs are supported in RPython + +# Basic regular weakrefs are supported in RPython. +# Note that if 'translation.rweakref' is False, they will +# still work, but be implemented as a strong reference. +# This case is useful for developing new GCs, for example. + +ref = weakref.ref + def has_weakref_support(): return True # returns False if --no-translation-rweakref diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -782,17 +782,21 @@ def op_weakref_create(self, v_obj): def objgetter(): # special support for gcwrapper.py return self.getval(v_obj) + assert self.llinterpreter.typer.getconfig().translation.rweakref return self.heap.weakref_create_getlazy(objgetter) op_weakref_create.specialform = True def op_weakref_deref(self, PTRTYPE, obj): + assert self.llinterpreter.typer.getconfig().translation.rweakref return self.heap.weakref_deref(PTRTYPE, obj) op_weakref_deref.need_result_type = True def op_cast_ptr_to_weakrefptr(self, obj): + assert self.llinterpreter.typer.getconfig().translation.rweakref return llmemory.cast_ptr_to_weakrefptr(obj) def op_cast_weakrefptr_to_ptr(self, PTRTYPE, obj): + assert self.llinterpreter.typer.getconfig().translation.rweakref return llmemory.cast_weakrefptr_to_ptr(PTRTYPE, obj) op_cast_weakrefptr_to_ptr.need_result_type = True diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -735,12 +735,21 @@ @typer_for(llmemory.weakref_create) @typer_for(weakref.ref) def rtype_weakref_create(hop): - vlist = hop.inputargs(hop.args_r[0]) + from rpython.rtyper.rweakref import BaseWeakRefRepr + + v_inst, = hop.inputargs(hop.args_r[0]) hop.exception_cannot_occur() - return hop.genop('weakref_create', vlist, resulttype=llmemory.WeakRefPtr) + if isinstance(hop.r_result, BaseWeakRefRepr): + return hop.r_result._weakref_create(hop, v_inst) + else: + # low-level + assert hop.rtyper.getconfig().translation.rweakref + return hop.genop('weakref_create', [v_inst], + resulttype=llmemory.WeakRefPtr) @typer_for(llmemory.weakref_deref) def rtype_weakref_deref(hop): + assert hop.rtyper.getconfig().translation.rweakref c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) assert v_wref.concretetype == llmemory.WeakRefPtr hop.exception_cannot_occur() @@ -748,6 +757,7 @@ @typer_for(llmemory.cast_ptr_to_weakrefptr) def rtype_cast_ptr_to_weakrefptr(hop): + assert hop.rtyper.getconfig().translation.rweakref vlist = hop.inputargs(hop.args_r[0]) hop.exception_cannot_occur() return hop.genop('cast_ptr_to_weakrefptr', vlist, @@ -755,6 +765,7 @@ @typer_for(llmemory.cast_weakrefptr_to_ptr) def rtype_cast_weakrefptr_to_ptr(hop): + assert hop.rtyper.getconfig().translation.rweakref c_ptrtype, v_wref = hop.inputargs(lltype.Void, hop.args_r[1]) assert v_wref.concretetype == llmemory.WeakRefPtr hop.exception_cannot_occur() diff --git a/rpython/rtyper/rweakref.py b/rpython/rtyper/rweakref.py --- a/rpython/rtyper/rweakref.py +++ b/rpython/rtyper/rweakref.py @@ -11,25 +11,22 @@ class __extend__(annmodel.SomeWeakRef): def rtyper_makerepr(self, rtyper): - return WeakRefRepr(rtyper) + if rtyper.getconfig().translation.rweakref: + return WeakRefRepr(rtyper) + else: + return EmulatedWeakRefRepr(rtyper) def rtyper_makekey(self): return self.__class__, -class WeakRefRepr(Repr): - lowleveltype = llmemory.WeakRefPtr - dead_wref = llmemory.dead_wref - null_wref = lltype.nullptr(llmemory.WeakRef) +class BaseWeakRefRepr(Repr): def __init__(self, rtyper): self.rtyper = rtyper - if not rtyper.getconfig().translation.rweakref: - raise TyperError("RPython-level weakrefs are not supported by " - "this backend or GC policy") def convert_const(self, value): if value is None: - return self.null_wref + return lltype.nullptr(self.lowleveltype.TO) assert isinstance(value, weakref.ReferenceType) instance = value() @@ -39,8 +36,7 @@ else: repr = self.rtyper.bindingrepr(Constant(instance)) llinstance = repr.convert_const(instance) - return self._weakref_create(llinstance) - + return self.do_weakref_create(llinstance) def rtype_simple_call(self, hop): v_wref, = hop.inputargs(self) @@ -48,8 +44,53 @@ if hop.r_result.lowleveltype is lltype.Void: # known-to-be-dead weakref return hop.inputconst(lltype.Void, None) else: - return hop.genop('weakref_deref', [v_wref], - resulttype=hop.r_result) + assert v_wref.concretetype == self.lowleveltype + return self._weakref_deref(hop, v_wref) - def _weakref_create(self, llinstance): + +class WeakRefRepr(BaseWeakRefRepr): + lowleveltype = llmemory.WeakRefPtr + dead_wref = llmemory.dead_wref + + def do_weakref_create(self, llinstance): return llmemory.weakref_create(llinstance) + + def _weakref_create(self, hop, v_inst): + return hop.genop('weakref_create', [v_inst], + resulttype=llmemory.WeakRefPtr) + + def _weakref_deref(self, hop, v_wref): + return hop.genop('weakref_deref', [v_wref], + resulttype=hop.r_result) + + +class EmulatedWeakRefRepr(BaseWeakRefRepr): + """For the case rweakref=False, we emulate RPython-level weakrefs + with regular strong references (but not low-level weakrefs). + """ + lowleveltype = lltype.Ptr(lltype.GcStruct('EmulatedWeakRef', + ('ref', llmemory.GCREF))) + dead_wref = lltype.malloc(lowleveltype.TO, immortal=True, zero=True) + + def do_weakref_create(self, llinstance): + p = lltype.malloc(self.lowleveltype.TO, immortal=True) + p.ref = lltype.cast_opaque_ptr(llmemory.GCREF, llinstance) + return p + + def _weakref_create(self, hop, v_inst): + c_type = hop.inputconst(lltype.Void, self.lowleveltype.TO) + c_flags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) + v_ptr = hop.genop('malloc', [c_type, c_flags], + resulttype=self.lowleveltype) + v_gcref = hop.genop('cast_opaque_ptr', [v_inst], + resulttype=llmemory.GCREF) + c_ref = hop.inputconst(lltype.Void, 'ref') + hop.genop('setfield', [v_ptr, c_ref, v_gcref]) + return v_ptr + + def _weakref_deref(self, hop, v_wref): + c_ref = hop.inputconst(lltype.Void, 'ref') + v_gcref = hop.genop('getfield', [v_wref, c_ref], + resulttype=llmemory.GCREF) + return hop.genop('cast_opaque_ptr', [v_gcref], + resulttype=hop.r_result) diff --git a/rpython/rtyper/test/test_rweakref.py b/rpython/rtyper/test/test_rweakref.py --- a/rpython/rtyper/test/test_rweakref.py +++ b/rpython/rtyper/test/test_rweakref.py @@ -138,3 +138,22 @@ res = self.interpret(f, []) assert res == lltype.nullptr(S) + + +class TestRWeakrefDisabled(BaseRtypingTest): + def test_no_real_weakref(self): + class A: + pass + a1 = A() + mylist = [weakref.ref(a1), None] + def g(): + a2 = A() + return weakref.ref(a2) + def fn(i): + w = g() + rgc.collect() + assert w() is not None + return mylist[i] is None + + assert self.interpret(fn, [0], rweakref=False) is False + assert self.interpret(fn, [1], rweakref=False) is True From pypy.commits at gmail.com Mon Apr 25 06:55:58 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 03:55:58 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: Fix for (at least some) ztranslation tests Message-ID: <571df7be.4ca51c0a.70cd1.ffffdb59@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83857:993d83f6db5a Date: 2016-04-25 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/993d83f6db5a/ Log: Fix for (at least some) ztranslation tests diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -186,6 +186,14 @@ class Cache: def __init__(self, space): from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + self.cls_without_del = _getusercls( space.config, W_InstanceObject, False, reallywantdict=True) self.cls_with_del = _getusercls( diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -120,6 +120,8 @@ 'bytearray', 'buffer', 'set', 'frozenset'] class FakeObjSpace(ObjSpace): + is_fake_objspace = True + def __init__(self, config=None): self._seen_extras = [] ObjSpace.__init__(self, config=config) From pypy.commits at gmail.com Mon Apr 25 06:55:56 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 03:55:56 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: Hack: the show() method on Blocks and Links now returns the Message-ID: <571df7bc.455ec20a.5e1f0.6b9a@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83856:990299ffaac6 Date: 2016-04-25 12:48 +0200 http://bitbucket.org/pypy/pypy/changeset/990299ffaac6/ Log: Hack: the show() method on Blocks and Links now returns the FunctionGraph object, too, which is useful in pdb diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -156,7 +156,7 @@ def show(self): from rpython.translator.tool.graphpage import try_show - try_show(self) + return try_show(self) view = show @@ -239,7 +239,7 @@ def show(self): from rpython.translator.tool.graphpage import try_show - try_show(self) + return try_show(self) def _slowly_get_graph(self): import gc diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -405,13 +405,14 @@ def try_show(obj): if isinstance(obj, FunctionGraph): obj.show() + return obj elif isinstance(obj, Link): - try_show(obj.prevblock) + return try_show(obj.prevblock) elif isinstance(obj, Block): graph = obj._slowly_get_graph() if isinstance(graph, FunctionGraph): graph.show() - return + return graph graph = IncompleteGraph(graph) SingleGraphPage(graph).display() else: From pypy.commits at gmail.com Mon Apr 25 07:11:50 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 04:11:50 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: missing import Message-ID: <571dfb76.021b1c0a.e5785.ffffd3e8@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83858:1c5dd97d9f8a Date: 2016-04-25 13:11 +0200 http://bitbucket.org/pypy/pypy/changeset/1c5dd97d9f8a/ Log: missing import diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1,3 +1,4 @@ +import py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef From pypy.commits at gmail.com Mon Apr 25 07:16:43 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 25 Apr 2016 04:16:43 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in vasanthaganeshk/pypy (pull request #433) Message-ID: <571dfc9b.a60ac20a.48b0a.ffff9662@mx.google.com> Author: Maciej Fijalkowski Branch: Changeset: r83861:af11a6e4a5a0 Date: 2016-04-25 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/af11a6e4a5a0/ Log: Merged in vasanthaganeshk/pypy (pull request #433) changed spelling of Fedora, changed yum to dnf diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -108,9 +108,9 @@ On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. From pypy.commits at gmail.com Mon Apr 25 07:16:46 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 25 Apr 2016 04:16:46 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: Class name changed here Message-ID: <571dfc9e.82b71c0a.9a7a3.ffffd200@mx.google.com> Author: Armin Rigo Branch: remove-objspace-options Changeset: r83862:c4dfc356046d Date: 2016-04-25 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c4dfc356046d/ Log: Class name changed here diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,9 +25,9 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p65 = getfield_gc_r(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) From pypy.commits at gmail.com Mon Apr 25 07:17:14 2016 From: pypy.commits at gmail.com (vasanthaganeshk) Date: Mon, 25 Apr 2016 04:17:14 -0700 (PDT) Subject: [pypy-commit] pypy default: changed spelling of Fedora, changed yum to dnf Message-ID: <571dfcba.c711c30a.5a7f.7fc6@mx.google.com> Author: Vasantha Ganesh K Branch: Changeset: r83859:12b3a5747ca0 Date: 2016-04-25 10:41 +0530 http://bitbucket.org/pypy/pypy/changeset/12b3a5747ca0/ Log: changed spelling of Fedora, changed yum to dnf diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -108,9 +108,9 @@ On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + (XXX plus the Fedora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. From pypy.commits at gmail.com Mon Apr 25 07:17:15 2016 From: pypy.commits at gmail.com (vasanthaganeshk) Date: Mon, 25 Apr 2016 04:17:15 -0700 (PDT) Subject: [pypy-commit] pypy default: added Debian equivalent packages for Fedora Message-ID: <571dfcbb.cbb81c0a.5133.ffffdf21@mx.google.com> Author: Vasantha Ganesh K Branch: Changeset: r83860:3ef8e389cbd7 Date: 2016-04-25 16:42 +0530 http://bitbucket.org/pypy/pypy/changeset/3ef8e389cbd7/ Log: added Debian equivalent packages for Fedora diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -109,8 +109,8 @@ On Fedora:: dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Fedora version of libgdbm-dev and tk-dev) + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. From pypy.commits at gmail.com Mon Apr 25 08:08:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 05:08:16 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: fixed up the test that integrates the whole jitlog on resoperations Message-ID: <571e08b0.08851c0a.eb894.ffffea8f@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83863:ba725a955100 Date: 2016-04-25 14:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ba725a955100/ Log: fixed up the test that integrates the whole jitlog on resoperations diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -97,7 +97,7 @@ last_prefix = compressor.get_last_written(i) cp = compressor.compress(i, str_value) if cp is None: - return chr(0xff) + encode_str(str_value) + return b'\xff' + encode_str(str_value) else: cp_len = len(cp) @@ -107,15 +107,15 @@ else: compressor.write(log, i, cp) if len(str_value) == len(cp): - return "\xef" - return chr(i) + encode_str(str_value[len(cp):]) + return b'\xef' + return b'\x00' + encode_str(str_value[len(cp):]) class IntValue(WrappedValue): def __init__(self, sem_type, gen_type, value): self.value = value def encode(self, log, i, prefixes): - return encode_le_64bit(self.value) + return b'\x00' + encode_le_64bit(self.value) # note that a ... # "semantic_type" is an integer denoting which meaning does a type at a merge point have @@ -147,31 +147,46 @@ JITLOG_VERSION = 1 JITLOG_VERSION_16BIT_LE = struct.pack(" Author: Armin Rigo Branch: remove-objspace-options Changeset: r83864:a350fbd4215a Date: 2016-04-25 15:08 +0200 http://bitbucket.org/pypy/pypy/changeset/a350fbd4215a/ Log: Fix the assert: * it should check the effectinfo we got, and not just the extraeffect we sent to effectinfo_from_writeanalyze(); * it should also crash if EF_RANDOM_EFFECTS. diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -301,7 +301,8 @@ # assert effectinfo is not None if elidable or loopinvariant: - assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE + assert (effectinfo.extraeffect < + EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE) # XXX this should also say assert not can_invalidate, but # it can't because our analyzer is not good enough for now # (and getexecutioncontext() can't really invalidate) From pypy.commits at gmail.com Mon Apr 25 09:22:27 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 06:22:27 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: renamed old module Message-ID: <571e1a13.8bd31c0a.5f55a.16cc@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83865:b20596712e8d Date: 2016-04-25 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/b20596712e8d/ Log: renamed old module diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -124,11 +124,11 @@ def enable_jitlog(self, fileno): # initialize the jit log - from rpython.jit.metainterp import jitlog + from rpython.rlib import jitlog as jl p_error = self.cintf.jitlog_init(fileno) if p_error: raise VMProfError(rffi.charp2str(p_error)) - blob = jitlog.assemble_header() + blob = jl.assemble_header() self.cintf.jitlog_write_marked(jitlog.MARK_JITLOG_HEADER, blob, len(blob)) def disable(self): From pypy.commits at gmail.com Mon Apr 25 09:22:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 06:22:29 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: fixed up tests Message-ID: <571e1a15.c9921c0a.f41ad.0bff@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83866:2f286840ea99 Date: 2016-04-25 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/2f286840ea99/ Log: fixed up tests diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py --- a/rpython/jit/backend/x86/test/test_jitlog.py +++ b/rpython/jit/backend/x86/test/test_jitlog.py @@ -4,8 +4,8 @@ from rpython.jit.tool.oparser import pure_parse from rpython.jit.metainterp import logger from rpython.jit.metainterp.typesystem import llhelper -from rpython.jit.metainterp.jitlog import JITLOG_VERSION_16BIT_LE -from rpython.jit.metainterp import jitlog +from rpython.rlib.jitlog import JITLOG_VERSION_16BIT_LE +from rpython.rlib import jitlog as jl from StringIO import StringIO from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr, BasicFinalDescr @@ -30,7 +30,7 @@ assert os.path.exists(file.strpath) with file.open('rb') as f: # check the file header - assert f.read(3) == '\x23' + JITLOG_VERSION_16BIT_LE + assert f.read(3) == chr(jl.MARK_JITLOG_HEADER) + JITLOG_VERSION_16BIT_LE assert len(f.read()) > 0 def test_env(self, monkeypatch, tmpdir): @@ -41,31 +41,31 @@ assert os.path.exists(file.strpath) with file.open('rb') as fd: # check the file header - assert fd.read(3) == '\x23' + JITLOG_VERSION_16BIT_LE + assert fd.read(3) == chr(jl.MARK_JITLOG_HEADER) + JITLOG_VERSION_16BIT_LE assert len(fd.read()) > 0 def test_version(self, monkeypatch, tmpdir): file = tmpdir.join('jitlog') - monkeypatch.setattr(jitlog, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') + monkeypatch.setattr(jl, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') monkeypatch.setenv("JITLOG", file.strpath) f = self.run_sample_loop(None) self.meta_interp(f, [10,0]) assert os.path.exists(file.strpath) with file.open('rb') as fd: # check the file header - assert fd.read(3) == '\x23\xff\xfe' + assert fd.read(3) == chr(jl.MARK_JITLOG_HEADER) + '\xff\xfe' assert len(fd.read()) > 0 def test_version(self, monkeypatch, tmpdir): file = tmpdir.join('jitlog') - monkeypatch.setattr(jitlog, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') + monkeypatch.setattr(jl, 'JITLOG_VERSION_16BIT_LE', '\xff\xfe') monkeypatch.setenv("JITLOG", file.strpath) f = self.run_sample_loop(None) self.meta_interp(f, [10,0]) assert os.path.exists(file.strpath) with file.open('rb') as fd: # check the file header - assert fd.read(3) == '\x23\xff\xfe' + assert fd.read(3) == chr(jl.MARK_JITLOG_HEADER) + '\xff\xfe' assert len(fd.read()) > 0 def run_sample_loop(self, func, myjitdriver = None): diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -129,7 +129,7 @@ if p_error: raise VMProfError(rffi.charp2str(p_error)) blob = jl.assemble_header() - self.cintf.jitlog_write_marked(jitlog.MARK_JITLOG_HEADER, blob, len(blob)) + self.cintf.jitlog_write_marked(jl.MARK_JITLOG_HEADER, blob, len(blob)) def disable(self): """Disable vmprof. diff --git a/rpython/rlib/test/test_jitlog.py b/rpython/rlib/test/test_jitlog.py --- a/rpython/rlib/test/test_jitlog.py +++ b/rpython/rlib/test/test_jitlog.py @@ -78,12 +78,12 @@ # result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','hello')]) assert result == b"\xef" - assert fakelog.values == ["\x25\x00\x05\x00\x00\x00hello"] + assert fakelog.values == [chr(jl.MARK_COMMON_PREFIX) + "\x00\x05\x00\x00\x00hello"] # fakelog.values = [] result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heiter')]) assert result == b"\x00\x04\x00\x00\x00iter" - assert fakelog.values == ["\x25\x00\x02\x00\x00\x00he"] + assert fakelog.values == [chr(jl.MARK_COMMON_PREFIX) + "\x00\x02\x00\x00\x00he"] # fakelog.values = [] result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','heute')]) @@ -98,7 +98,7 @@ fakelog.values = [] result = jl.encode_merge_point(fakelog, compressor, [jl.StringValue(0x0,'s','welle')]) assert result == b"\x00\x02\x00\x00\x00le" - assert fakelog.values == ["\x25\x00\x03\x00\x00\x00wel"] + assert fakelog.values == [chr(jl.MARK_COMMON_PREFIX) + "\x00\x03\x00\x00\x00wel"] def test_common_prefix_func(self): assert jl.commonprefix("","") == "" From pypy.commits at gmail.com Mon Apr 25 09:31:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 06:31:15 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: creating jit logger for base optimization tests Message-ID: <571e1c23.cbb81c0a.5133.18d1@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83867:c186763aab26 Date: 2016-04-25 15:30 +0200 http://bitbucket.org/pypy/pypy/changeset/c186763aab26/ Log: creating jit logger for base optimization tests diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -21,6 +21,7 @@ from rpython.jit.metainterp.resoperation import (rop, ResOperation, InputArgRef, AbstractValue, OpHelpers) from rpython.jit.metainterp.optimizeopt.util import args_dict +from rpython.rlib import jitlog as jl def test_sort_descrs(): @@ -449,6 +450,7 @@ self.options = Fake() self.globaldata = Fake() self.config = get_combined_translation_config(translating=True) + self.jitlog = jl.VMProfJitLogger() class logger_noopt: @classmethod From pypy.commits at gmail.com Mon Apr 25 09:55:23 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 06:55:23 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: removed old files, adapted main pypy jit driver Message-ID: <571e21cb.91d31c0a.115db.1fd5@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83868:dcee45542016 Date: 2016-04-25 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/dcee45542016/ Log: removed old files, adapted main pypy jit driver diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -41,6 +41,8 @@ from rpython.rlib import rvmprof return rvmprof.get_unique_id(bytecode) + at jl.returns(jl.MP_FILENAME, jl.MP_LINENO, + jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE) def get_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names opname = opcode_method_names[ord(bytecode.co_code[next_instr])] @@ -49,8 +51,8 @@ name = bytecode.co_name if not name: name = "" - return "shshs", [bytecode.co_filename, bytecode.co_firstlineno, - name, intmask(next_instr), opname] + return (bytecode.co_filename, bytecode.co_firstlineno, + name, intmask(next_instr), opname) def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py deleted file mode 100644 diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -4,6 +4,7 @@ from rpython.jit.metainterp.compile import compile_loop from rpython.jit.metainterp.compile import compile_tmp_callback from rpython.jit.metainterp import jitexc +from rpython.rlib import jitlog as jl from rpython.jit.metainterp import jitprof, typesystem, compile from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.tool.oparser import parse, convert_loop_to_trace @@ -58,6 +59,7 @@ logger_noopt = FakeLogger() logger_ops = FakeLogger() config = get_combined_translation_config(translating=True) + jitlog = jl.VMProfJitLogger() stats = Stats(None) profiler = jitprof.EmptyProfiler() diff --git a/rpython/jit/metainterp/test/test_jitlog.py b/rpython/jit/metainterp/test/test_jitlog.py deleted file mode 100644 --- a/rpython/jit/metainterp/test/test_jitlog.py +++ /dev/null @@ -1,49 +0,0 @@ -from rpython.jit.tool.oparser import pure_parse -from rpython.jit.metainterp import jitlog -from rpython.jit.metainterp.jitlog import (encode_str, encode_le_16bit, - encode_le_64bit) -from rpython.jit.metainterp.optimizeopt.util import equaloplists -from rpython.jit.metainterp.resoperation import ResOperation, rop -from rpython.jit.backend.model import AbstractCPU -from rpython.jit.metainterp.history import ConstInt, ConstPtr -import tempfile - -class TestLogger(object): - - def make_metainterp_sd(self): - class FakeJitDriver(object): - class warmstate(object): - @staticmethod - def get_location(greenkey_list): - assert len(greenkey_list) == 0 - return '/home/pypy/jit.py', 0, 'enclosed', 99, 'DEL' - - class FakeMetaInterpSd: - cpu = AbstractCPU() - cpu.ts = None - jitdrivers_sd = [FakeJitDriver()] - def get_name_from_address(self, addr): - return 'Name' - return FakeMetaInterpSd() - - def test_debug_merge_point(self, tmpdir): - logger = jitlog.VMProfJitLogger() - file = tmpdir.join('binary_file') - file.ensure() - fd = file.open('wb') - logger.cintf.jitlog_init(fd.fileno()) - log_trace = logger.log_trace(0, self.make_metainterp_sd(), None) - op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) - log_trace.write([], [op]) - #the next line will close 'fd' - fd.close() - logger.finish() - binary = file.read() - assert binary.startswith(b'\x00\x04\x00\x00\x00loop') - assert binary.endswith(b'\x24' + \ - encode_str('/home/pypy/jit.py') + \ - encode_le_16bit(0) + \ - encode_str('enclosed') + \ - encode_le_64bit(99) + \ - encode_str('DEL')) - diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -567,7 +567,7 @@ # annhelper = MixLevelHelperAnnotator(self.translator.rtyper) for jd in self.jitdrivers_sd: - jd._printable_loc_ptr = self._make_hook_graph(jd, + jd._get_printable_location_ptr = self._make_hook_graph(jd, annhelper, jd.jitdriver.get_printable_location, annmodel.SomeString()) jd._get_unique_id_ptr = self._make_hook_graph(jd, diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -680,7 +680,7 @@ else: drivername = '' # get_location returns - get_location_ptr = self.jitdriver_sd._get_location_ptr + get_location_ptr = getattr(self.jitdriver_sd, '_get_location_ptr', None) if get_location_ptr is not None: types = self.jitdriver_sd._get_loc_types unwrap_greenkey = self.make_unwrap_greenkey() @@ -706,7 +706,7 @@ self.get_location_types = None self.get_location = None # - printable_loc_ptr = self.jitdriver_sd._printable_loc_ptr + printable_loc_ptr = self.jitdriver_sd._get_printable_location_ptr if printable_loc_ptr is None: missing = '(%s: no get_printable_location)' % drivername def get_location_str(greenkey): From pypy.commits at gmail.com Mon Apr 25 10:18:20 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 25 Apr 2016 07:18:20 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: (cfbolz, arigo advising) test the cutoff and make it less random Message-ID: <571e272c.4e981c0a.7ecf4.2ab9@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83869:f77dd6069673 Date: 2016-04-25 17:16 +0300 http://bitbucket.org/pypy/pypy/changeset/f77dd6069673/ Log: (cfbolz, arigo advising) test the cutoff and make it less random diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -313,6 +313,38 @@ assert name2.endswith("x") assert T1 == T2 + def test_cutoff(self): + from rpython.rlib.unroll import unrolling_iterable + cutoff = 20 + attrs = unrolling_iterable(["s%s" % i for i in range(cutoff + 5)]) + + class A(object): + def __init__(self, y): + for attr in attrs: + setattr(self, attr, y) + def f(self): + self.x = 1 + res = 0 + for attr in attrs: + res += getattr(self, attr) + return res + + def h(flag): + obj = A(flag) + return obj.f() + + t, wa = self.translate(h, [int]) + wa.cutoff = cutoff + hgraph = graphof(t, h) + op_call_f = hgraph.startblock.operations[-1] + + # check that we fished the expected ops + assert op_call_f.opname == "direct_call" + assert op_call_f.args[0].value._obj._name == 'A.f' + + result = wa.analyze(op_call_f) + assert result is top_set + def test_contains(self): def g(x, y, z): l = [x] diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -7,6 +7,8 @@ CUTOFF = 1000 class WriteAnalyzer(graphanalyze.GraphAnalyzer): + cutoff = CUTOFF + def bottom_result(self): return empty_set @@ -22,9 +24,9 @@ def add_to_result(self, result, other): if other is top_set: return top_set - if len(other) + len(result) > CUTOFF: + result.update(other) + if len(result) > self.cutoff: return top_set - result.update(other) return result def finalize_builder(self, result): From pypy.commits at gmail.com Mon Apr 25 10:18:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 25 Apr 2016 07:18:22 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: (cfbolz, arigo advising) somewhat randomly raise the cutoff to 3000 to make Message-ID: <571e272e.508e1c0a.3593.179a@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83870:e2eba8fa9b9e Date: 2016-04-25 17:17 +0300 http://bitbucket.org/pypy/pypy/changeset/e2eba8fa9b9e/ Log: (cfbolz, arigo advising) somewhat randomly raise the cutoff to 3000 to make things work with pypy, allworkingmodules and mapdict diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -4,7 +4,7 @@ top_set = object() empty_set = frozenset() -CUTOFF = 1000 +CUTOFF = 3000 class WriteAnalyzer(graphanalyze.GraphAnalyzer): cutoff = CUTOFF From pypy.commits at gmail.com Mon Apr 25 10:49:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 25 Apr 2016 07:49:00 -0700 (PDT) Subject: [pypy-commit] pypy vendor/stdlib-3.5.1: Branch for importing the 3.5.1 stdlib from CPython Message-ID: <571e2e5c.de361c0a.ab75f.3850@mx.google.com> Author: Ronan Lamy Branch: vendor/stdlib-3.5.1 Changeset: r83871:0222f0f390eb Date: 2016-04-24 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/0222f0f390eb/ Log: Branch for importing the 3.5.1 stdlib from CPython From pypy.commits at gmail.com Mon Apr 25 10:51:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 25 Apr 2016 07:51:02 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Open branch for 3.5 Message-ID: <571e2ed6.c42e1c0a.21369.ffffa22b@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r83873:1456861b1ea6 Date: 2016-04-24 21:05 +0100 http://bitbucket.org/pypy/pypy/changeset/1456861b1ea6/ Log: Open branch for 3.5 From pypy.commits at gmail.com Mon Apr 25 14:10:19 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 25 Apr 2016 11:10:19 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: even more different (comes from a different module now) Message-ID: <571e5d8b.52ad1c0a.c220c.7fe0@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83874:af9fedfce658 Date: 2016-04-25 21:09 +0300 http://bitbucket.org/pypy/pypy/changeset/af9fedfce658/ Log: even more different (comes from a different module now) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,9 +25,9 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p65 = getfield_gc_r(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) From pypy.commits at gmail.com Mon Apr 25 14:18:30 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:30 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: try to summarize IRC conversation Message-ID: <571e5f76.c653c20a.695e7.244d@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83875:88c3748b0de0 Date: 2016-04-22 17:12 +0300 http://bitbucket.org/pypy/pypy/changeset/88c3748b0de0/ Log: try to summarize IRC conversation diff --git a/pypy/module/cpyext/bytearrayobject.py b/pypy/module/cpyext/bytearrayobject.py --- a/pypy/module/cpyext/bytearrayobject.py +++ b/pypy/module/cpyext/bytearrayobject.py @@ -16,9 +16,12 @@ # For the convenience of C programmers, the bytes type is considered # to contain a char pointer, not an unsigned char pointer. -# XXX Since the ob_bytes is mutable, we must reflect the buffer back -# into the W_ByteArray object at each call to from_ref and each call to -# exported functions +# XXX The underlying data array is mutable, cpython gives direct access +# to ob_bytes as a RW pointer to bytes. How can we do this? +# One proposal is to make W_Bytearray.data into a nonmovable gc list +# as part of as_pyobj(), and expose data only through PyByteArray_AS_STRING +# Under this strategy ob_bytes could possibly not reflect the current state +# of the object PyByteArrayObjectStruct = lltype.ForwardReference() PyByteArrayObject = lltype.Ptr(PyByteArrayObjectStruct) From pypy.commits at gmail.com Mon Apr 25 14:18:34 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:34 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: merge default into branch Message-ID: <571e5f7a.89cbc20a.86581.19cd@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83877:b88a7cbb6b17 Date: 2016-04-25 07:59 +0300 http://bitbucket.org/pypy/pypy/changeset/b88a7cbb6b17/ Log: merge default into branch diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -20,3 +20,4 @@ 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 +3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1 diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -51,6 +51,8 @@ # if log is not opened, open it now if not _S_log_open: openlog() + if isinstance(message, unicode): + message = str(message) lib.syslog(priority, "%s", message) @builtinify diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -102,7 +102,7 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev + tk-dev libgc-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -1,16 +1,22 @@ What is PyPy? ============= -In common parlance, PyPy has been used to mean two things. The first is the -:ref:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. +Historically, PyPy has been used to mean two things. The first is the +:ref:`RPython translation toolchain ` for generating +interpreters for dynamic programming languages. And the second is one +particular implementation of Python_ produced with it. Because RPython +uses the same syntax as Python, this generated version became known as +Python interpreter written in Python. It is designed to be flexible and +easy to experiment with. -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the +To make it more clear, we start with source code written in RPython, +apply the RPython translation toolchain, and end up with PyPy as a +binary executable. This executable is the Python interpreter. + +Double usage has proven to be confusing, so we've moved away from using +the word PyPy to mean both toolchain and generated interpreter. Now we +use word PyPy to refer to the Python implementation, and explicitly +mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -3,10 +3,17 @@ ======== We have released PyPy 5.1, about a month after PyPy 5.0. -We encourage all users of PyPy to update to this version. Apart from the usual -bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. + +This release includes more improvement to warmup time and memory +requirements. We have seen about a 20% memory requirement reduction and up to +30% warmup time improvement, more detail in the `blog post`_. + +We also now have `fully support for the IBM s390x`_. Since this support is in +`RPython`_, any dynamic language written using RPython, like PyPy, will +automagically be supported on that architecture. + +We updated cffi_ to 1.6, and continue to improve support for the wider +python ecosystem using the PyPy interpreter. You can download the PyPy 5.1 release here: @@ -26,6 +33,9 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _cffi: https://cffi.readthedocs.org +.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html +.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html What is PyPy? ============= @@ -46,7 +56,7 @@ * big- and little-endian variants of **PPC64** running Linux, - * **s960x** running Linux + * **s390x** running Linux .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org @@ -74,6 +84,8 @@ * Fix a corner case in the JIT * Fix edge cases in the cpyext refcounting-compatible semantics + (more work on cpyext compatibility is coming in the ``cpyext-ext`` + branch, but isn't ready yet) * Try harder to not emit NEON instructions on ARM processors without NEON support @@ -92,11 +104,17 @@ * Fix sandbox startup (a regression in 5.0) + * Fix possible segfault for classes with mangled mro or __metaclass__ + + * Fix isinstance(deque(), Hashable) on the pure python deque + + * Fix an issue with forkpty() + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* Numpy: +* Numpy_: * Implemented numpy.where for a single argument @@ -108,6 +126,8 @@ functions exported from libpypy.so are declared in pypy_numpy.h, which is included only when building our fork of numpy + * Add broadcast + * Performance improvements: * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting @@ -119,14 +139,18 @@ * Remove the forced minor collection that occurs when rewriting the assembler at the start of the JIT backend + * Port the resource module to cffi + * Internal refactorings: * Use a simpler logger to speed up translation * Drop vestiges of Python 2.5 support in testing + * Update rpython functions with ones needed for py3k + .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html -.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _Numpy: https://bitbucket.org/pypy/numpy Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -60,3 +60,13 @@ Remove old uneeded numpy headers, what is left is only for testing. Also generate pypy_numpy.h which exposes functions to directly use micronumpy ndarray and ufuncs + +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,14 +3,10 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: 2180e1eaf6f6 +.. startrev: aa60332382a1 -.. branch: rposix-for-3 +.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 -Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). -This updates the underlying rpython functions with the ones needed for the -py3k branch - -.. branch: numpy_broadcast +.. branch: gcheader-decl -Add broadcast to micronumpy +Reduce the size of generated C sources. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -46,6 +46,7 @@ '_get_types': 'func._get_types', '_get_common_types': 'func._get_common_types', 'from_buffer': 'func.from_buffer', + 'gcp': 'func.gcp', 'string': 'func.string', 'unpack': 'func.unpack', diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1773,14 +1773,14 @@ def test_introspect_order(self): ffi, lib = self.prepare(""" - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -92,7 +92,8 @@ return ctype._call(self.fnptr, args_w) def descr_repr(self, space): - return space.wrap("" % (self.fnname,)) + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + return space.wrap("" % (doc,)) def descr_get_doc(self, space): doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -180,10 +180,9 @@ def specialized_zip_2_lists(space, w_list1, w_list2): from pypy.objspace.std.listobject import W_ListObject - if (not isinstance(w_list1, W_ListObject) or - not isinstance(w_list2, W_ListObject)): + if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject: raise OperationError(space.w_TypeError, - space.wrap("expected two lists")) + space.wrap("expected two exact lists")) if space.config.objspace.std.withspecialisedtuple: intlist1 = w_list1.getitems_int() diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -3,13 +3,17 @@ min=1 rev=0 branchname=release-$maj.x # ==OR== release-$maj.$min.x -tagname=release-$maj.$min.$rev +tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev + +hg log -r $branchname || exit 1 +hg log -r $tagname || exit 1 + # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. # The script should be run in an empty directory, i.e. /tmp/release_xxx -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 s390x do wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -341,6 +341,20 @@ self.prebuilt_root_objects = self.AddressStack() # self._init_writebarrier_logic() + # + # The size of all the objects turned from 'young' to 'old' + # since we started the last major collection cycle. This is + # used to track progress of the incremental GC: normally, we + # run one major GC step after each minor collection, but if a + # lot of objects are made old, we need run two or more steps. + # Otherwise the risk is that we create old objects faster than + # we're collecting them. The 'threshold' is incremented after + # each major GC step at a fixed rate; the idea is that as long + # as 'size_objects_made_old > threshold_objects_made_old' then + # we must do more major GC steps. See major_collection_step() + # for more details. + self.size_objects_made_old = r_uint(0) + self.threshold_objects_made_old = r_uint(0) def setup(self): @@ -464,7 +478,7 @@ self.gc_nursery_debug = True else: self.gc_nursery_debug = False - self.minor_collection() # to empty the nursery + self._minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery() @@ -509,8 +523,8 @@ self.min_heap_size = max(self.min_heap_size, self.nursery_size * self.major_collection_threshold) # the following two values are usually equal, but during raw mallocs - # of arrays, next_major_collection_threshold is decremented to make - # the next major collection arrive earlier. + # with memory pressure accounting, next_major_collection_threshold + # is decremented to make the next major collection arrive earlier. # See translator/c/test/test_newgc, test_nongc_attached_to_gc self.next_major_collection_initial = self.min_heap_size self.next_major_collection_threshold = self.min_heap_size @@ -700,21 +714,60 @@ def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" - if gen <= 1: - self.minor_collection() - if gen == 1 or (self.gc_state != STATE_SCANNING and gen != -1): + if gen < 0: + self._minor_collection() # dangerous! no major GC cycle progress + elif gen <= 1: + self.minor_collection_with_major_progress() + if gen == 1 and self.gc_state == STATE_SCANNING: self.major_collection_step() else: self.minor_and_major_collection() self.rrc_invoke_callback() + def minor_collection_with_major_progress(self, extrasize=0): + """Do a minor collection. Then, if there is already a major GC + in progress, run at least one major collection step. If there is + no major GC but the threshold is reached, start a major GC. + """ + self._minor_collection() + + # If the gc_state is STATE_SCANNING, we're not in the middle + # of an incremental major collection. In that case, wait + # until there is too much garbage before starting the next + # major collection. But if we are in the middle of an + # incremental major collection, then always do (at least) one + # step now. + # + # Within a major collection cycle, every call to + # major_collection_step() increments + # 'threshold_objects_made_old' by nursery_size/2. + + if self.gc_state != STATE_SCANNING or self.threshold_reached(extrasize): + self.major_collection_step(extrasize) + + # See documentation in major_collection_step() for target invariants + while self.gc_state != STATE_SCANNING: # target (A1) + threshold = self.threshold_objects_made_old + if threshold >= r_uint(extrasize): + threshold -= r_uint(extrasize) # (*) + if self.size_objects_made_old <= threshold: # target (A2) + break + # Note that target (A2) is tweaked by (*); see + # test_gc_set_max_heap_size in translator/c, test_newgc.py + + self._minor_collection() + self.major_collection_step(extrasize) + + self.rrc_invoke_callback() + + def collect_and_reserve(self, totalsize): """To call when nursery_free overflows nursery_top. First check if pinned objects are in front of nursery_top. If so, jump over the pinned object and try again to reserve totalsize. - Otherwise do a minor collection, and possibly a major collection, and - finally reserve totalsize bytes. + Otherwise do a minor collection, and possibly some steps of a + major collection, and finally reserve totalsize bytes. """ minor_collection_count = 0 @@ -757,47 +810,27 @@ self.nursery_top = self.nursery_barriers.popleft() else: minor_collection_count += 1 - self.minor_collection() if minor_collection_count == 1: + self.minor_collection_with_major_progress() + else: + # Nursery too full again. This is likely because of + # execute_finalizers() or rrc_invoke_callback(). + # we need to fix it with another call to minor_collection() + # ---this time only the minor part so that we are sure that + # the nursery is empty (apart from pinned objects). # - # If the gc_state is STATE_SCANNING, we're not in - # the middle of an incremental major collection. - # In that case, wait until there is too much - # garbage before starting the next major - # collection. But if we are in the middle of an - # incremental major collection, then always do (at - # least) one step now. + # Note that this still works with the counters: + # 'size_objects_made_old' will be increased by + # the _minor_collection() below. We don't + # immediately restore the target invariant that + # 'size_objects_made_old <= threshold_objects_made_old'. + # But we will do it in the next call to + # minor_collection_with_major_progress(). # - # This will increment next_major_collection_threshold - # by nursery_size//2. If more than nursery_size//2 - # survives, then threshold_reached() might still be - # true after that. In that case we do a second step. - # The goal is to avoid too high memory peaks if the - # program allocates a lot of surviving objects. - # - if (self.gc_state != STATE_SCANNING or - self.threshold_reached()): - - self.major_collection_step() - - if (self.gc_state != STATE_SCANNING and - self.threshold_reached()): # ^^but only if still - self.minor_collection() # the same collection - self.major_collection_step() - # - self.rrc_invoke_callback() - # - # The nursery might not be empty now, because of - # execute_finalizers() or rrc_invoke_callback(). - # If it is almost full again, - # we need to fix it with another call to minor_collection(). - if self.nursery_free + totalsize > self.nursery_top: - self.minor_collection() - # - else: ll_assert(minor_collection_count == 2, - "Seeing minor_collection() at least twice." - "Too many pinned objects?") + "Calling minor_collection() twice is not " + "enough. Too many pinned objects?") + self._minor_collection() # # Tried to do something about nursery_free overflowing # nursery_top before this point. Try to reserve totalsize now. @@ -855,21 +888,9 @@ # to major_collection_step(). If there is really no memory, # then when the major collection finishes it will raise # MemoryError. - # - # The logic is to first do a minor GC only, and check if that - # was enough to free a bunch of large young objects. If it - # was, then we don't do any major collection step. - # - while self.threshold_reached(raw_malloc_usage(totalsize)): - self.minor_collection() - if self.threshold_reached(raw_malloc_usage(totalsize) + - self.nursery_size // 2): - self.major_collection_step(raw_malloc_usage(totalsize)) - self.rrc_invoke_callback() - # note that this loop should not be infinite: when the - # last step of a major collection is done but - # threshold_reached(totalsize) is still true, then - # we should get a MemoryError from major_collection_step(). + if self.threshold_reached(raw_malloc_usage(totalsize)): + self.minor_collection_with_major_progress( + raw_malloc_usage(totalsize) + self.nursery_size // 2) # # Check if the object would fit in the ArenaCollection. # Also, an object allocated from ArenaCollection must be old. @@ -1547,7 +1568,7 @@ # ---------- # Nursery collection - def minor_collection(self): + def _minor_collection(self): """Perform a minor collection: find the objects from the nursery that remain alive and move them out.""" # @@ -1718,6 +1739,10 @@ self.old_objects_pointing_to_pinned.foreach( self._reset_flag_old_objects_pointing_to_pinned, None) # + # Accounting: 'nursery_surviving_size' is the size of objects + # from the nursery that we just moved out. + self.size_objects_made_old += r_uint(self.nursery_surviving_size) + # debug_print("minor collect, total memory used:", self.get_total_memory_used()) debug_print("number of pinned objects:", @@ -1958,6 +1983,7 @@ self.header(obj).tid &= ~GCFLAG_HAS_SHADOW # totalsize = size_gc_header + self.get_size(obj) + self.nursery_surviving_size += raw_malloc_usage(totalsize) # # Copy it. Note that references to other objects in the # nursery are kept unchanged in this step. @@ -2002,6 +2028,11 @@ return hdr.tid |= GCFLAG_VISITED_RMY # + # Accounting + size_gc_header = self.gcheaderbuilder.size_gc_header + size = size_gc_header + self.get_size(obj) + self.size_objects_made_old += r_uint(raw_malloc_usage(size)) + # # we just made 'obj' old, so we need to add it to the correct lists added_somewhere = False # @@ -2084,14 +2115,14 @@ def gc_step_until(self, state): while self.gc_state != state: - self.minor_collection() + self._minor_collection() self.major_collection_step() debug_gc_step_until = gc_step_until # xxx def debug_gc_step(self, n=1): while n > 0: - self.minor_collection() + self._minor_collection() self.major_collection_step() n -= 1 @@ -2111,37 +2142,44 @@ self.debug_check_consistency() # + # 'threshold_objects_made_old', is used inside comparisons + # with 'size_objects_made_old' to know when we must do + # several major GC steps (i.e. several consecurive calls + # to the present function). Here is the target that + # we try to aim to: either (A1) or (A2) + # + # (A1) gc_state == STATE_SCANNING (i.e. major GC cycle ended) + # (A2) size_objects_made_old <= threshold_objects_made_old + # # Every call to major_collection_step() adds nursery_size//2 - # to the threshold. It is reset at the end of this function - # when the major collection is fully finished. - # + # to 'threshold_objects_made_old'. # In the common case, this is larger than the size of all # objects that survive a minor collection. After a few # minor collections (each followed by one call to # major_collection_step()) the threshold is much higher than - # the currently-in-use old memory. Then threshold_reached() - # won't be true again until the major collection fully - # finishes, time passes, and it's time for the next major - # collection. + # the 'size_objects_made_old', making the target invariant (A2) + # true by a large margin. # # However there are less common cases: # - # * if more than half of the nursery consistently survives: we - # call major_collection_step() twice after a minor - # collection; + # * if more than half of the nursery consistently survives: + # then we need two calls to major_collection_step() after + # some minor collection; # # * or if we're allocating a large number of bytes in - # external_malloc(). In that case, we are likely to reach - # again the threshold_reached() case, and more major - # collection steps will be done immediately until - # threshold_reached() returns false. + # external_malloc() and some of them survive the following + # minor collection. In that case, more than two major + # collection steps must be done immediately, until we + # restore the target invariant (A2). # - self.next_major_collection_threshold += self.nursery_size // 2 + self.threshold_objects_made_old += r_uint(self.nursery_size // 2) - # XXX currently very coarse increments, get this working then split - # to smaller increments using stacks for resuming if self.gc_state == STATE_SCANNING: + # starting a major GC cycle: reset these two counters + self.size_objects_made_old = r_uint(0) + self.threshold_objects_made_old = r_uint(self.nursery_size // 2) + self.objects_to_trace = self.AddressStack() self.collect_roots() self.gc_state = STATE_MARKING diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -19,6 +19,8 @@ BaseDirectGCTest.setup_method(self, meth) max = getattr(meth, 'max_number_of_pinned_objects', 20) self.gc.max_number_of_pinned_objects = max + if not hasattr(self.gc, 'minor_collection'): + self.gc.minor_collection = self.gc._minor_collection def test_pin_can_move(self): # even a pinned object is considered to be movable. Only the caller diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -67,6 +67,8 @@ def dot(self): """Output a mandelbrot dot to the terminal.""" + if not isatty(): + return global wrote_dot if not wrote_dot: mandelbrot_driver.reset() diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py --- a/rpython/tool/test/test_ansi_print.py +++ b/rpython/tool/test/test_ansi_print.py @@ -65,6 +65,19 @@ assert output[3] == ('[test:WARNING] maybe?\n', (31,)) assert len(output[4][0]) == 1 # single character +def test_no_tty(): + log = ansi_print.AnsiLogger('test') + with FakeOutput(tty=False) as output: + log.dot() + log.dot() + log.WARNING('oops') + log.WARNING('maybe?') + log.dot() + assert len(output) == 2 + assert output[0] == ('[test:WARNING] oops\n', ()) + assert output[1] == ('[test:WARNING] maybe?\n', ()) + + def test_unknown_method_names(): log = ansi_print.AnsiLogger('test') with FakeOutput() as output: diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -547,7 +547,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None @@ -678,7 +677,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -399,7 +399,7 @@ try: configure_boehm(self.translator.platform) except CompilationError, e: - i = 'Boehm GC not installed. Try e.g. "translate.py --gc=hybrid"' + i = 'Boehm GC not installed. Try e.g. "translate.py --gc=minimark"' raise Exception(str(e) + '\n' + i) @taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], From pypy.commits at gmail.com Mon Apr 25 14:18:36 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:36 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: update TODO Message-ID: <571e5f7c.6a70c20a.745cb.ffffb491@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83878:dd16152b0b79 Date: 2016-04-25 19:46 +0300 http://bitbucket.org/pypy/pypy/changeset/dd16152b0b79/ Log: update TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,5 +1,6 @@ -* finish PySequence_Fast -* typeobject.py and handling of __float__ prevents us from using pypy * python setup.py install in numpy does not somehow tell setuptools it's installed (I bet it's about the py27 tag) -* implement PyFile_AsFile +* reduce size of generated c code from slot definitions in slotdefs. +* fix py_string_as_string_unicode-getstringandsize_unicode which + segfaults when run -A after printing '.', the same test passes cpython -A + and untranslated From pypy.commits at gmail.com Mon Apr 25 14:18:37 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:37 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: update TODO Message-ID: <571e5f7d.cbb81c0a.5133.ffff8cca@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83879:ac747a3c2ef4 Date: 2016-04-25 19:52 +0300 http://bitbucket.org/pypy/pypy/changeset/ac747a3c2ef4/ Log: update TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,5 +1,10 @@ -* finish PySequence_Fast -* typeobject.py and handling of __float__ prevents us from using pypy * python setup.py install in numpy does not somehow tell setuptools it's installed (I bet it's about the py27 tag) -* implement PyFile_AsFile +* reduce size of generated c code from slot definitions in slotdefs. +* fix py_string_as_string_unicode-getstringandsize_unicode which + segfaults when run -A after printing '.', the same test passes cpython -A + and untranslated +* export ndarrayobject objects like PyArrayObject, PyArrayDescrObject needed + to coninue using micronumpy as a numpy 1.10 ndarray alternative + This used to be done with pypy-specific headers which replaced upstream's + headers, can be tested by installing matplotlib or aubio (pypy/numpy issue #47) From pypy.commits at gmail.com Mon Apr 25 14:18:32 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:32 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: prepare cpyext-ext for merging back to default Message-ID: <571e5f78.c30a1c0a.5c087.ffff8aa3@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83876:4baa1ad93d29 Date: 2016-04-25 00:03 +0300 http://bitbucket.org/pypy/pypy/changeset/4baa1ad93d29/ Log: prepare cpyext-ext for merging back to default From pypy.commits at gmail.com Mon Apr 25 14:18:39 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 11:18:39 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: skip 'wishlist' tests Message-ID: <571e5f7f.4412c30a.595e.218f@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83880:0a80052c8e88 Date: 2016-04-25 21:16 +0300 http://bitbucket.org/pypy/pypy/changeset/0a80052c8e88/ Log: skip 'wishlist' tests diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,4 +1,4 @@ -import py +import pytest from pypy.module.cpyext.test.test_api import BaseApiTest class TestMemoryViewObject(BaseApiTest): @@ -12,5 +12,6 @@ w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" + @pytest.mark.skipif(True, reason='write a test for this') def test_get_base_and_get_buffer(self, space, api): assert False # XXX test PyMemoryView_GET_BASE, PyMemoryView_GET_BUFFER diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -1,4 +1,4 @@ -import py +import py, pytest from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -231,6 +231,7 @@ assert type(x) is int assert x == -424344 + @pytest.mark.skipif(True, reason='realloc not fully implemented') def test_object_realloc(self): module = self.import_extension('foo', [ ("realloctest", "METH_NOARGS", diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -102,7 +102,7 @@ instance = space.call_function(space.w_ValueError) assert api.PyExceptionInstance_Class(instance) is space.w_ValueError - @pytest.mark.skipif("sys.platform == 'win32'") + @pytest.mark.skipif(True, reason='not implemented yet') def test_interrupt_occurred(self, space, api): assert not api.PyOS_InterruptOccurred() import signal, os From pypy.commits at gmail.com Mon Apr 25 16:18:52 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 13:18:52 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: revert 8d781f7a74f7, fix issue #2282 Message-ID: <571e7bac.0c371c0a.93d61.ffffb251@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83882:eccbe359298b Date: 2016-04-25 08:20 +0300 http://bitbucket.org/pypy/pypy/changeset/eccbe359298b/ Log: revert 8d781f7a74f7, fix issue #2282 diff too long, truncating to 2000 out of 2282 lines diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -144,7 +144,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,6 +152,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -1211,7 +1223,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,6 +1,8 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 */ -/* For testing ndarrayobject only */ +/* NDArray object interface - S. H. Muller, 2013/07/26 + * It will be copied by numpy/core/setup.py by install_data to + * site-packages/numpy/core/includes/numpy +*/ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -8,8 +10,13 @@ extern "C" { #endif +#include "old_defines.h" #include "npy_common.h" -#include "ndarraytypes.h" +#include "__multiarray_api.h" + +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -17,20 +24,208 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -/* functions defined in ndarrayobject.c*/ +#ifndef NDARRAYTYPES_H +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +/* data types copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + + +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_FLAGS _PyArray_FLAGS + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) + +#define PyArray_New _PyArray_New +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,9 +1,69 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H -/* For testing ndarrayobject only */ +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" -#include "numpy/npy_common.h" +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -31,6 +91,18 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + /* * These characters correspond to the array type and the struct * module @@ -85,6 +157,27 @@ }; typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -93,6 +186,7 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -106,6 +200,729 @@ NPY_KEEPORDER=2 } NPY_ORDER; +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* * C API: consists of Macros and functions. The MACROS are defined @@ -120,4 +937,850 @@ #define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) #define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +//#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. From pypy.commits at gmail.com Mon Apr 25 16:18:54 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 13:18:54 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: update version to 5.1.1 Message-ID: <571e7bae.c4efc20a.9e9e3.5b83@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83883:3279ee014769 Date: 2016-04-25 08:22 +0300 http://bitbucket.org/pypy/pypy/changeset/3279ee014769/ Log: update version to 5.1.1 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.1.0" -#define PYPY_VERSION_NUM 0x05010000 +#define PYPY_VERSION "5.1.1" +#define PYPY_VERSION_NUM 0x05010100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 1, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 1, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Mon Apr 25 16:18:55 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Apr 2016 13:18:55 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: fix test (revert part of a03329def3ec) Message-ID: <571e7baf.22d8c20a.e2be4.6b38@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83884:814b194d959c Date: 2016-04-25 08:45 +0300 http://bitbucket.org/pypy/pypy/changeset/814b194d959c/ Log: fix test (revert part of a03329def3ec) diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -102,7 +102,7 @@ def test_copy_header_files(tmpdir): - api.copy_header_files(tmpdir) + api.copy_header_files(tmpdir, True) def check(name): f = tmpdir.join(name) assert f.check(file=True) From pypy.commits at gmail.com Mon Apr 25 17:22:02 2016 From: pypy.commits at gmail.com (amauryfa) Date: Mon, 25 Apr 2016 14:22:02 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Hack until the objspace can start and pass some tests. Message-ID: <571e8a7a.6614c20a.a79c4.6f9e@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.5 Changeset: r83885:55f9c52f86f4 Date: 2016-04-25 23:20 +0200 http://bitbucket.org/pypy/pypy/changeset/55f9c52f86f4/ Log: Hack until the objspace can start and pass some tests. diff --git a/lib-python/3/importlib/_bootstrap.py b/lib-python/3/importlib/_bootstrap.py --- a/lib-python/3/importlib/_bootstrap.py +++ b/lib-python/3/importlib/_bootstrap.py @@ -1137,6 +1137,6 @@ sys.meta_path.append(FrozenImporter) global _bootstrap_external - import _frozen_importlib_external + _frozen_importlib_external = sys.modules['_frozen_importlib_external'] _bootstrap_external = _frozen_importlib_external _frozen_importlib_external._install(sys.modules[__name__]) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -557,7 +557,6 @@ ops.LIST_APPEND: -1, ops.SET_ADD: -1, ops.MAP_ADD: -2, - ops.STORE_MAP: -2, ops.BINARY_POWER: -1, ops.BINARY_MULTIPLY: -1, @@ -597,9 +596,9 @@ ops.PRINT_EXPR: -1, - ops.WITH_CLEANUP: -1, + ops.WITH_CLEANUP_START: -1, + ops.WITH_CLEANUP_FINISH: -1, # XXX Sometimes more ops.LOAD_BUILD_CLASS: 1, - ops.STORE_LOCALS: -1, ops.POP_BLOCK: 0, ops.POP_EXCEPT: -1, ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY @@ -612,7 +611,6 @@ ops.RETURN_VALUE: -1, ops.YIELD_VALUE: 0, ops.YIELD_FROM: -1, - ops.BUILD_MAP: 1, ops.COMPARE_OP: -1, ops.LOOKUP_METHOD: 1, @@ -672,6 +670,12 @@ def _compute_BUILD_SET(arg): return 1 - arg +def _compute_BUILD_MAP(arg): + return 1 - 2 * arg + +def _compute_BUILD_MAP_UNPACK(arg): + return 1 - arg + def _compute_MAKE_CLOSURE(arg): return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -274,8 +274,7 @@ return False def _get_code_flags(self): - # Default for everything but module scopes. - return consts.CO_NEWLOCALS + return 0 def _handle_body(self, body): """Compile a list of statements, handling doc strings if needed.""" @@ -865,7 +864,8 @@ self.load_const(self.space.w_None) self.use_next_block(cleanup) self.push_frame_block(F_BLOCK_FINALLY_END, cleanup) - self.emit_op(ops.WITH_CLEANUP) + self.emit_op(ops.WITH_CLEANUP_START) + self.emit_op(ops.WITH_CLEANUP_FINISH) self.emit_op(ops.END_FINALLY) self.pop_frame_block(F_BLOCK_FINALLY_END, cleanup) @@ -1062,12 +1062,26 @@ def visit_Dict(self, d): self.update_position(d.lineno) - self.emit_op_arg(ops.BUILD_MAP, 0) + containers = 0 + elements = 0 if d.values: for i in range(len(d.values)): + if elements == 0xFFFF: + self.emit_op_arg(ops.BUILD_MAP, elements) + containers += 1 + elements = 0 d.values[i].walkabout(self) d.keys[i].walkabout(self) - self.emit_op(ops.STORE_MAP) + elements += 1 + if elements or containers == 0: + self.emit_op_arg(ops.BUILD_MAP, elements) + containers += 1 + # If there is more than one dict, they need to be merged into + # a new dict. + while containers > 1: + oparg = max(containers, 255) + self.emit_op_arg(ops.BUILD_MAP_UNPACK, oparg) + containers -= (oparg - 1) def visit_Set(self, s): self.update_position(s.lineno) @@ -1289,7 +1303,7 @@ def _get_code_flags(self): scope = self.scope assert isinstance(scope, symtable.FunctionScope) - flags = 0 + flags = consts.CO_NEWLOCALS if scope.optimized: flags |= consts.CO_OPTIMIZED if scope.nested: @@ -1372,10 +1386,6 @@ self.ensure_docstring_constant(cls.body) self.lineno = self.first_lineno self.argcount = 1 - # load the first argument (__locals__) ... - self.emit_op_arg(ops.LOAD_FAST, 0) - # ...and store it into f_locals. - self.emit_op(ops.STORE_LOCALS) # load (global) __name__ ... self.name_op("__name__", ast.Load) # ... and store it as __module__ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -380,10 +380,6 @@ self.STORE_FAST(oparg, next_instr) elif opcode == opcodedesc.STORE_GLOBAL.index: self.STORE_GLOBAL(oparg, next_instr) - elif opcode == opcodedesc.STORE_LOCALS.index: - self.STORE_LOCALS(oparg, next_instr) - elif opcode == opcodedesc.STORE_MAP.index: - self.STORE_MAP(oparg, next_instr) elif opcode == opcodedesc.STORE_NAME.index: self.STORE_NAME(oparg, next_instr) elif opcode == opcodedesc.STORE_SUBSCR.index: @@ -400,8 +396,10 @@ self.UNPACK_EX(oparg, next_instr) elif opcode == opcodedesc.UNPACK_SEQUENCE.index: self.UNPACK_SEQUENCE(oparg, next_instr) - elif opcode == opcodedesc.WITH_CLEANUP.index: - self.WITH_CLEANUP(oparg, next_instr) + elif opcode == opcodedesc.WITH_CLEANUP_START.index: + self.WITH_CLEANUP_START(oparg, next_instr) + elif opcode == opcodedesc.WITH_CLEANUP_FINISH.index: + self.WITH_CLEANUP_FINISH(oparg, next_instr) elif opcode == opcodedesc.YIELD_VALUE.index: self.YIELD_VALUE(oparg, next_instr) elif opcode == opcodedesc.YIELD_FROM.index: @@ -692,9 +690,6 @@ def LOAD_LOCALS(self, oparg, next_instr): self.pushvalue(self.getorcreatedebug().w_locals) - def STORE_LOCALS(self, oparg, next_instr): - self.getorcreatedebug().w_locals = self.popvalue() - def exec_(self, w_prog, w_globals, w_locals): """The builtins.exec function.""" space = self.space @@ -1138,7 +1133,7 @@ self.lastblock = block self.pushvalue(w_result) - def WITH_CLEANUP(self, oparg, next_instr): + def WITH_CLEANUP_START(self, oparg, next_instr): # see comment in END_FINALLY for stack state w_unroller = self.popvalue() w_exitfunc = self.popvalue() @@ -1149,21 +1144,28 @@ old_last_exception = self.last_exception self.last_exception = operr w_traceback = self.space.wrap(operr.get_traceback()) - w_suppress = self.call_contextmanager_exit_function( + w_res = self.call_contextmanager_exit_function( w_exitfunc, operr.w_type, operr.get_w_value(self.space), w_traceback) self.last_exception = old_last_exception + else: + w_res = self.call_contextmanager_exit_function( + w_exitfunc, + self.space.w_None, + self.space.w_None, + self.space.w_None) + self.pushvalue(w_res) + self.pushvalue(w_unroller) + + def WITH_CLEANUP_FINISH(self, oparg, next_instr): + w_unroller = self.popvalue() + w_suppress = self.popvalue() + if isinstance(w_unroller, SApplicationException): if self.space.is_true(w_suppress): # __exit__() returned True -> Swallow the exception. self.settopvalue(self.space.w_None) - else: - self.call_contextmanager_exit_function( - w_exitfunc, - self.space.w_None, - self.space.w_None, - self.space.w_None) @jit.unroll_safe def call_function(self, oparg, w_star=None, w_starstar=None): @@ -1309,6 +1311,10 @@ def BUILD_MAP(self, itemcount, next_instr): w_dict = self.space.newdict() + for i in range(itemcount): + w_key = self.popvalue() + w_value = self.popvalue() + self.space.setitem(w_dict, w_key, w_value) self.pushvalue(w_dict) @jit.unroll_safe @@ -1319,12 +1325,6 @@ self.space.call_method(w_set, 'add', w_item) self.pushvalue(w_set) - def STORE_MAP(self, oparg, next_instr): - w_key = self.popvalue() - w_value = self.popvalue() - w_dict = self.peekvalue() - self.space.setitem(w_dict, w_key, w_value) - ### ____________________________________________________________ ### diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -3,11 +3,12 @@ """ from pypy.interpreter.pycode import PyCode -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.astcompiler import consts, ast from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.argument import Arguments from pypy.interpreter.nestedscope import Cell +from pypy.interpreter.function import Function @unwrap_spec(filename='fsencode', mode=str, flags=int, dont_inherit=int, optimize=int) @@ -94,6 +95,8 @@ frame.exec_(w_prog, w_globals, w_locals) def build_class(space, w_func, w_name, __args__): + if not isinstance(w_func, Function): + raise oefmt(space.w_TypeError, "__build_class__: func must be a function") bases_w, kwds_w = __args__.unpack() w_bases = space.newtuple(bases_w) w_meta = kwds_w.pop('metaclass', None) @@ -124,7 +127,7 @@ keywords=keywords, keywords_w=kwds_w.values()) w_namespace = space.call_args(w_prep, args) - w_cell = space.call_function(w_func, w_namespace) + w_cell = w_func.getcode().exec_code(space, w_func.w_func_globals, w_namespace) keywords = kwds_w.keys() args = Arguments(space, args_w=[w_name, w_bases, w_namespace], diff --git a/pypy/module/_frozen_importlib/__init__.py b/pypy/module/_frozen_importlib/__init__.py --- a/pypy/module/_frozen_importlib/__init__.py +++ b/pypy/module/_frozen_importlib/__init__.py @@ -13,22 +13,33 @@ appleveldefs = { } + @staticmethod + def _compile_bootstrap_module(space, name, w_name, w_dict): + """NOT_RPYTHON""" + ec = space.getexecutioncontext() + with open(os.path.join(lib_python, 'importlib', name + '.py')) as fp: + source = fp.read() + pathname = "" % name + code_w = ec.compiler.compile(source, pathname, 'exec', 0) + space.setitem(w_dict, space.wrap('__name__'), w_name) + space.setitem(w_dict, space.wrap('__builtins__'), + space.wrap(space.builtin)) + code_w.exec_code(space, w_dict, w_dict) + def install(self): """NOT_RPYTHON""" super(Module, self).install() space = self.space + # "import importlib/_boostrap_external.py" + w_mod = Module(space, space.wrap("_frozen_importlib_external")) + self._compile_bootstrap_module( + space, '_bootstrap_external', w_mod.w_name, w_mod.w_dict) + space.sys.setmodule(w_mod) # "from importlib/_boostrap.py import *" # It's not a plain "import importlib._boostrap", because we # don't want to freeze importlib.__init__. - ec = space.getexecutioncontext() - with open(os.path.join(lib_python, 'importlib', '_bootstrap.py')) as fp: - source = fp.read() - pathname = "" - code_w = ec.compiler.compile(source, pathname, 'exec', 0) - space.setitem(self.w_dict, space.wrap('__name__'), self.w_name) - space.setitem(self.w_dict, space.wrap('__builtins__'), - space.wrap(space.builtin)) - code_w.exec_code(space, self.w_dict, self.w_dict) + self._compile_bootstrap_module( + space, '_bootstrap', self.w_name, self.w_dict) self.w_import = space.wrap(interp_import.import_with_frames_removed) diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -13,10 +13,12 @@ 'get_magic': 'interp_imp.get_magic', 'get_tag': 'interp_imp.get_tag', 'load_dynamic': 'interp_imp.load_dynamic', - 'init_builtin': 'interp_imp.init_builtin', + 'create_builtin': 'interp_imp.create_builtin', 'init_frozen': 'interp_imp.init_frozen', 'is_builtin': 'interp_imp.is_builtin', 'is_frozen': 'interp_imp.is_frozen', + 'exec_builtin': 'interp_imp.exec_builtin', + 'exec_dynamic': 'interp_imp.exec_builtin', 'get_frozen_object': 'interp_imp.get_frozen_object', 'is_frozen_package': 'interp_imp.is_frozen_package', diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -64,10 +64,9 @@ return importing.check_sys_modules(space, w_modulename) -def init_builtin(space, w_name): +def create_builtin(space, w_spec): + w_name = space.getattr(w_spec, space.wrap("name")) name = space.str0_w(w_name) - if name not in space.builtin_modules: - return # force_init is needed to make reload actually reload instead of just # using the already-present module in sys.modules. @@ -76,6 +75,9 @@ reuse = space.finditem(space.sys.get('modules'), w_name) is not None return space.getbuiltinmodule(name, force_init=True, reuse=reuse) +def exec_builtin(space, w_mod): + return # Until we really support ModuleDef + def init_frozen(space, w_name): return None diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -193,7 +193,10 @@ def startup(self, space): from pypy.module.posix import interp_posix + from pypy.module.imp import importing interp_posix.get(space).startup(space) + # Import structseq before the full importlib is ready + importing.importhook(space, '_structseq') for constant in dir(os): value = getattr(os, constant) From pypy.commits at gmail.com Mon Apr 25 17:44:22 2016 From: pypy.commits at gmail.com (amauryfa) Date: Mon, 25 Apr 2016 14:44:22 -0700 (PDT) Subject: [pypy-commit] pypy py3.5: Fix for class definition inside a function. Message-ID: <571e8fb6.85661c0a.49c9.ffffe709@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.5 Changeset: r83886:1ec6fa2191c0 Date: 2016-04-25 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/1ec6fa2191c0/ Log: Fix for class definition inside a function. diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -127,7 +127,10 @@ keywords=keywords, keywords_w=kwds_w.values()) w_namespace = space.call_args(w_prep, args) - w_cell = w_func.getcode().exec_code(space, w_func.w_func_globals, w_namespace) + code = w_func.getcode() + frame = space.createframe(code, w_func.w_func_globals, w_func) + frame.setdictscope(w_namespace) + w_cell = frame.run() keywords = kwds_w.keys() args = Arguments(space, args_w=[w_name, w_bases, w_namespace], From pypy.commits at gmail.com Mon Apr 25 20:11:43 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 25 Apr 2016 17:11:43 -0700 (PDT) Subject: [pypy-commit] pypy default: Add more details on how to update the stdlib files while properly tracking renames. Message-ID: <571eb23f.85661c0a.873b8.1033@mx.google.com> Author: Manuel Jacob Branch: Changeset: r83887:cb3d30fa02ee Date: 2016-04-26 02:10 +0200 http://bitbucket.org/pypy/pypy/changeset/cb3d30fa02ee/ Log: Add more details on how to update the stdlib files while properly tracking renames. (+ small tweaks) diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k From pypy.commits at gmail.com Mon Apr 25 20:35:58 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 25 Apr 2016 17:35:58 -0700 (PDT) Subject: [pypy-commit] pypy default: simplify sys_exc_info to return None for a cleared exception and reuse some of Message-ID: <571eb7ee.d3161c0a.eb71.fffffed8@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83888:c24bc2ff9f5d Date: 2016-04-25 17:34 -0700 http://bitbucket.org/pypy/pypy/changeset/c24bc2ff9f5d/ Log: simplify sys_exc_info to return None for a cleared exception and reuse some of it in RAISE_VARARGS diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: From pypy.commits at gmail.com Mon Apr 25 20:57:09 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 25 Apr 2016 17:57:09 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix translation on osx, specialize utime when called w/ None Message-ID: <571ebce5.a2f2c20a.dfd0b.ffff9c18@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83889:597f649edbb9 Date: 2016-04-25 17:55 -0700 http://bitbucket.org/pypy/pypy/changeset/597f649edbb9/ Log: fix translation on osx, specialize utime when called w/ None diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -13,6 +13,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable +from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.gateway import ( unwrap_spec, WrappedDefault, Unwrapper, kwonly) @@ -43,6 +44,9 @@ raise OperationError(space.w_OverflowError, space.wrap("integer out of range")) +# specialize utime when called w/ None for use w/ call_rposix +utime_now = func_with_new_name(rposix.utime, 'utime_now') + class FileEncoder(object): is_unicode = True @@ -1385,11 +1389,11 @@ not space.is_w(w_ns, space.w_None)): raise oefmt(space.w_ValueError, "utime: you may specify either 'times' or 'ns' but not both") - utime_now = False + now = False if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): atime_s = mtime_s = 0 atime_ns = mtime_ns = 0 - utime_now = True + now = True elif not space.is_w(w_times, space.w_None): times_w = space.fixedview(w_times) if len(times_w) != 2: @@ -1412,7 +1416,7 @@ if not follow_symlinks: raise oefmt(space.w_ValueError, "utime: cannot use fd and follow_symlinks together") - if utime_now: + if now: atime_ns = mtime_ns = rposix.UTIME_NOW try: rposix.futimens(path.as_fd, atime_s, atime_ns, mtime_s, mtime_ns) @@ -1432,7 +1436,7 @@ raise oefmt(space.w_NotImplementedError, "utime: unsupported value for 'path'") try: - if utime_now: + if now: rposix.utimensat( path_b, 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, dir_fd=dir_fd, follow_symlinks=follow_symlinks) @@ -1451,9 +1455,9 @@ if not space.is_w(w_ns, space.w_None): raise oefmt(space.w_NotImplementedError, "utime: 'ns' unsupported on this platform on PyPy") - if utime_now: + if now: try: - call_rposix(rposix.utime, path, None) + call_rposix(utime_now, path, None) except OSError as e: # see comment above raise wrap_oserror(space, e) From pypy.commits at gmail.com Tue Apr 26 02:13:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Apr 2016 23:13:59 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: fixed translation issues Message-ID: <571f0727.d81a1c0a.16653.621c@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83890:13151099cbf6 Date: 2016-04-26 08:13 +0200 http://bitbucket.org/pypy/pypy/changeset/13151099cbf6/ Log: fixed translation issues diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -5,7 +5,7 @@ from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside -from rpython.rlib import jit, jit_hooks +from rpython.rlib import jit, jit_hooks, jitlog as jl from rpython.rlib.jit import current_trace_length, unroll_parameters,\ JitHookInterface from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -45,9 +45,10 @@ jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE) def get_location(next_instr, is_being_profiled, bytecode): from pypy.tool.stdlib_opcode import opcode_method_names - opname = opcode_method_names[ord(bytecode.co_code[next_instr])] - if not opname: - opname = "" + bcindex = ord(bytecode.co_code[next_instr]) + opname = "" + if 0 <= bcindex < len(opcode_method_names): + opname = opcode_method_names[bcindex] name = bytecode.co_name if not name: name = "" diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -22,6 +22,8 @@ , (rop.UNICODESETITEM, 0, -1) ] +UNROLLED_MODIFY_COMPLEX_OBJ = unrolling_iterable(MODIFY_COMPLEX_OBJ) + LOAD_COMPLEX_OBJ = [ (rop.GETARRAYITEM_GC_I, 0, 1) , (rop.GETARRAYITEM_GC_F, 0, 1) , (rop.GETARRAYITEM_GC_R, 0, 1) @@ -40,6 +42,8 @@ , (rop.GETFIELD_RAW_R, 0, -1) ] +UNROLLED_LOAD_COMPLEX_OBJ = unrolling_iterable(LOAD_COMPLEX_OBJ) + class Path(object): def __init__(self,path): self.path = path @@ -202,7 +206,7 @@ args = [] op = self.op if self.modifies_complex_object(): - for opnum, i, j in unrolling_iterable(MODIFY_COMPLEX_OBJ): + for opnum, i, j in UNROLLED_MODIFY_COMPLEX_OBJ: #unrolling_iterable(MODIFY_COMPLEX_OBJ): if op.getopnum() == opnum: op_args = op.getarglist() if j == -1: @@ -723,7 +727,7 @@ if node.loads_from_complex_object(): # If this complex object load operation loads an index that has been # modified, the last modification should be used to put a def-use edge. - for opnum, i, j in unrolling_iterable(LOAD_COMPLEX_OBJ): + for opnum, i, j in UNROLLED_LOAD_COMPLEX_OBJ: if opnum == op.getopnum(): cobj = op.getarg(i) if j != -1: diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -581,8 +581,6 @@ annhelper, jd.jitdriver.should_unroll_one_iteration, annmodel.s_Bool) # - s_Str = annmodel.SomeString(no_nul=True) - s_Int = annmodel.SomeInteger() items = [] types = () pos = () @@ -601,9 +599,9 @@ # for _,type in types: if type == 's': - items.append(s_Str) + items.append(annmodel.SomeString()) elif type == 'i': - items.append(s_Int) + items.append(annmodel.SomeInteger()) else: raise NotImplementedError s_Tuple = annmodel.SomeTuple(items) diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -176,17 +176,20 @@ ('COMMON_PREFIX',), ] -start = 0x10 +start = 0x11 for mark, in marks: globals()['MARK_' + mark] = start start += 1 if __name__ == "__main__": print("# generated constants from rpython/rlib/jitlog.py") - for mark in marks: - print '%s = %d' % ('MARK_' + mark, globals()['MARK_' + mark]) + print 'MARK_JITLOG_START = chr(%s)' % hex(0x10) + for mark, in marks: + print '%s = chr(%s)' % ('MARK_' + mark, hex(globals()['MARK_' + mark])) + print 'MARK_JITLOG_END = chr(%s)' % hex(start) del marks +del start IS_32_BIT = sys.maxint == 2**31-1 @@ -331,7 +334,7 @@ def write(self, args, ops, ops_offset={}): log = self.logger - log._write_marked(self.tag, encode_le_addr(self.logger.trace_id)) + log._write_marked(self.tag, encode_le_64bit(self.logger.trace_id)) # input args str_args = [self.var_to_str(arg) for arg in args] From pypy.commits at gmail.com Tue Apr 26 04:44:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 01:44:16 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: A branch to use a bitstring to replace the lists of read or written Message-ID: <571f2a60.8d1f1c0a.2b11a.ffff9d8d@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83891:c7aefe4f5dce Date: 2016-04-25 18:27 +0200 http://bitbucket.org/pypy/pypy/changeset/c7aefe4f5dce/ Log: A branch to use a bitstring to replace the lists of read or written FieldDescrs that we attach to EffectInfo From pypy.commits at gmail.com Tue Apr 26 04:44:18 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 01:44:18 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: The basic implementation Message-ID: <571f2a62.6a70c20a.745cb.ffffa8b7@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83892:227dd39eb692 Date: 2016-04-25 18:38 +0200 http://bitbucket.org/pypy/pypy/changeset/227dd39eb692/ Log: The basic implementation diff --git a/rpython/tool/algo/bitstring.py b/rpython/tool/algo/bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/bitstring.py @@ -0,0 +1,20 @@ + + +def make_bitstring(lst): + "NOT_RPYTHON" + if not lst: + return '' + num_bits = max(lst) + 1 + num_bytes = (num_bits + 7) // 8 + entries = [0] * num_bytes + for x in lst: + assert x >= 0 + entries[x >> 3] |= 1 << (x & 7) + return ''.join(map(chr, entries)) + +def bitcheck(bitstring, n): + assert n >= 0 + byte_number = n >> 3 + if byte_number >= len(bitstring): + return False + return (ord(bitstring[byte_number]) & (1 << (n & 7))) != 0 diff --git a/rpython/tool/algo/test/test_bitstring.py b/rpython/tool/algo/test/test_bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/test/test_bitstring.py @@ -0,0 +1,20 @@ +from rpython.tool.algo.bitstring import * +from hypothesis import given, strategies + +def test_make(): + assert make_bitstring([]) == '' + assert make_bitstring([0]) == '\x01' + assert make_bitstring([7]) == '\x80' + assert make_bitstring([8]) == '\x00\x01' + assert make_bitstring([2, 4, 20]) == '\x14\x00\x10' + +def test_bitcheck(): + assert bitcheck('\x01', 0) is True + assert bitcheck('\x01', 1) is False + assert bitcheck('\x01', 10) is False + assert [n for n in range(32) if bitcheck('\x14\x00\x10', n)] == [2, 4, 20] + + at given(strategies.lists(strategies.integers(min_value=0, max_value=299))) +def test_random(lst): + bitstring = make_bitstring(lst) + assert set([n for n in range(300) if bitcheck(bitstring, n)]) == set(lst) From pypy.commits at gmail.com Tue Apr 26 04:44:20 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 01:44:20 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: in-progress Message-ID: <571f2a64.10691c0a.10606.ffff8425@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83893:93c32636b538 Date: 2016-04-25 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/93c32636b538/ Log: in-progress diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -2,6 +2,31 @@ from rpython.rtyper.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer +from rpython.tool.algo import bitstring + + + +class DescrCounterCache(object): + cache = [] + + def make_descr_set(self, lst): + if lst is None: + return None + integer_list = [] + for descr in lst: + try: + x = descr._ei_index + except AttributeError: + x = descr._ei_index = len(self.cache) + self.cache.append(descr) + integer_list.append(x) + return bitstring.make_bitstring(integer_list) + +def expand_descr_list(cpu, bitstr): + # for debugging + return [cpu._descr_counter_cache.cache[i] + for i in range(bitstring.num_bits(bitstr)) + if bitstring.bitcheck(bitstr, i)] class EffectInfo(object): @@ -109,13 +134,21 @@ oopspecindex=OS_NONE, can_invalidate=False, call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, - extradescrs=None): - key = (frozenset_or_none(readonly_descrs_fields), - frozenset_or_none(readonly_descrs_arrays), - frozenset_or_none(readonly_descrs_interiorfields), - frozenset_or_none(write_descrs_fields), - frozenset_or_none(write_descrs_arrays), - frozenset_or_none(write_descrs_interiorfields), + extradescrs=None, + descr_counter_cache=DescrCounterCache()): + make = descr_counter_cache.make_descr_set + readonly_descrs_fields = make(readonly_descrs_fields) + readonly_descrs_arrays = make(readonly_descrs_arrays) + readonly_descrs_interiorfields = make(readonly_descrs_interiorfields) + write_descrs_fields = make(write_descrs_fields) + write_descrs_arrays = make(write_descrs_arrays) + write_descrs_interiorfields = make(write_descrs_interiorfields) + key = (readonly_descrs_fields, + readonly_descrs_arrays, + readonly_descrs_interiorfields, + write_descrs_fields, + write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate) @@ -142,19 +175,9 @@ result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays result.readonly_descrs_interiorfields = readonly_descrs_interiorfields - if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ - extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ - extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ - extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: - # Ignore the writes. Note that this ignores also writes with - # no corresponding reads (rarely the case, but possible). - result.write_descrs_fields = [] - result.write_descrs_arrays = [] - result.write_descrs_interiorfields = [] - else: - result.write_descrs_fields = write_descrs_fields - result.write_descrs_arrays = write_descrs_arrays - result.write_descrs_interiorfields = write_descrs_interiorfields + result.write_descrs_fields = write_descrs_fields + result.write_descrs_arrays = write_descrs_arrays + result.write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex @@ -196,11 +219,6 @@ return '' % (id(self), self.extraeffect, more) -def frozenset_or_none(x): - if x is None: - return None - return frozenset(x) - EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) @@ -287,6 +305,18 @@ else: assert 0 # + if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ + extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ + extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ + extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: + # Ignore the writes. Note that this ignores also writes with + # no corresponding reads (rarely the case, but possible). + write_descrs_fields = [] + write_descrs_arrays = [] + write_descrs_interiorfields = [] + # + if not hasattr(cpu, '_descr_counter_cache'): + cpu._descr_counter_cache = DescrCounterCache() return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, readonly_descrs_interiorfields, @@ -297,7 +327,8 @@ oopspecindex, can_invalidate, call_release_gil_target, - extradescr) + extradescr, + cpu._descr_counter_cache) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,19 +1,23 @@ import pytest from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, - EffectInfo, VirtualizableAnalyzer) + EffectInfo, VirtualizableAnalyzer, expand_descr_list) from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import OBJECT from rpython.translator.translator import TranslationContext, graphof +class FakeDescr(tuple): + pass + + class FakeCPU(object): def fielddescrof(self, T, fieldname): - return ('fielddescr', T, fieldname) + return FakeDescr(('fielddescr', T, fieldname)) def arraydescrof(self, A): - return ('arraydescr', A) + return FakeDescr(('arraydescr', A)) def test_no_oopspec_duplicate(): @@ -28,83 +32,98 @@ def test_include_read_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.readonly_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert (expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == + [('fielddescr', S, "a")]) + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert (expand_descr_list(cpu, effectinfo.write_descrs_fields) == + [('fielddescr', S, "a")]) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert (expand_descr_list(cpu, effectinfo.readonly_descrs_arrays) == + [('arraydescr', A)]) + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert (expand_descr_list(cpu, effectinfo.write_descrs_arrays) == + [('arraydescr', A)]) def test_dont_include_read_and_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a"), ("struct", lltype.Ptr(S), "a")]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert (expand_descr_list(cpu, effectinfo.write_descrs_fields) == + [('fielddescr', S, "a")]) + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_dont_include_read_and_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A)), ("array", lltype.Ptr(A))]) - effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.readonly_descrs_arrays - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.readonly_descrs_arrays) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert (expand_descr_list(cpu, effectinfo.write_descrs_arrays) == + [('arraydescr', A)]) def test_filter_out_typeptr(): effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")]) - effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) - effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) - effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + cpu = FakeCPU() + effectinfo = effectinfo_from_writeanalyze(effects, cpu) + assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] + assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] class TestVirtualizableAnalyzer(object): diff --git a/rpython/tool/algo/bitstring.py b/rpython/tool/algo/bitstring.py --- a/rpython/tool/algo/bitstring.py +++ b/rpython/tool/algo/bitstring.py @@ -18,3 +18,6 @@ if byte_number >= len(bitstring): return False return (ord(bitstring[byte_number]) & (1 << (n & 7))) != 0 + +def num_bits(bitstring): + return len(bitstring) << 3 diff --git a/rpython/tool/algo/test/test_bitstring.py b/rpython/tool/algo/test/test_bitstring.py --- a/rpython/tool/algo/test/test_bitstring.py +++ b/rpython/tool/algo/test/test_bitstring.py @@ -18,3 +18,8 @@ def test_random(lst): bitstring = make_bitstring(lst) assert set([n for n in range(300) if bitcheck(bitstring, n)]) == set(lst) + +def test_num_bits(): + assert num_bits('') == 0 + assert num_bits('a') == 8 + assert num_bits('bcd') == 24 From pypy.commits at gmail.com Tue Apr 26 04:44:22 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 01:44:22 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Revert the changes here. Must be done more lazily in order to figure Message-ID: <571f2a66.923f1c0a.28d2d.fffff831@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83894:bf37c9521f1e Date: 2016-04-26 09:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bf37c9521f1e/ Log: Revert the changes here. Must be done more lazily in order to figure out which FieldDescrs are present in exactly the same CallDescrs. On PyPy there are 4000 FieldDescrs but they be grouped into 373 such "families". diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -2,31 +2,6 @@ from rpython.rtyper.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer -from rpython.tool.algo import bitstring - - - -class DescrCounterCache(object): - cache = [] - - def make_descr_set(self, lst): - if lst is None: - return None - integer_list = [] - for descr in lst: - try: - x = descr._ei_index - except AttributeError: - x = descr._ei_index = len(self.cache) - self.cache.append(descr) - integer_list.append(x) - return bitstring.make_bitstring(integer_list) - -def expand_descr_list(cpu, bitstr): - # for debugging - return [cpu._descr_counter_cache.cache[i] - for i in range(bitstring.num_bits(bitstr)) - if bitstring.bitcheck(bitstr, i)] class EffectInfo(object): @@ -134,21 +109,13 @@ oopspecindex=OS_NONE, can_invalidate=False, call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, - extradescrs=None, - descr_counter_cache=DescrCounterCache()): - make = descr_counter_cache.make_descr_set - readonly_descrs_fields = make(readonly_descrs_fields) - readonly_descrs_arrays = make(readonly_descrs_arrays) - readonly_descrs_interiorfields = make(readonly_descrs_interiorfields) - write_descrs_fields = make(write_descrs_fields) - write_descrs_arrays = make(write_descrs_arrays) - write_descrs_interiorfields = make(write_descrs_interiorfields) - key = (readonly_descrs_fields, - readonly_descrs_arrays, - readonly_descrs_interiorfields, - write_descrs_fields, - write_descrs_arrays, - write_descrs_interiorfields, + extradescrs=None): + key = (frozenset_or_none(readonly_descrs_fields), + frozenset_or_none(readonly_descrs_arrays), + frozenset_or_none(readonly_descrs_interiorfields), + frozenset_or_none(write_descrs_fields), + frozenset_or_none(write_descrs_arrays), + frozenset_or_none(write_descrs_interiorfields), extraeffect, oopspecindex, can_invalidate) @@ -175,9 +142,19 @@ result.readonly_descrs_fields = readonly_descrs_fields result.readonly_descrs_arrays = readonly_descrs_arrays result.readonly_descrs_interiorfields = readonly_descrs_interiorfields - result.write_descrs_fields = write_descrs_fields - result.write_descrs_arrays = write_descrs_arrays - result.write_descrs_interiorfields = write_descrs_interiorfields + if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ + extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ + extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ + extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: + # Ignore the writes. Note that this ignores also writes with + # no corresponding reads (rarely the case, but possible). + result.write_descrs_fields = [] + result.write_descrs_arrays = [] + result.write_descrs_interiorfields = [] + else: + result.write_descrs_fields = write_descrs_fields + result.write_descrs_arrays = write_descrs_arrays + result.write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex @@ -219,6 +196,11 @@ return '' % (id(self), self.extraeffect, more) +def frozenset_or_none(x): + if x is None: + return None + return frozenset(x) + EffectInfo.MOST_GENERAL = EffectInfo(None, None, None, None, None, None, EffectInfo.EF_RANDOM_EFFECTS, can_invalidate=True) @@ -305,18 +287,6 @@ else: assert 0 # - if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ - extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ - extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ - extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: - # Ignore the writes. Note that this ignores also writes with - # no corresponding reads (rarely the case, but possible). - write_descrs_fields = [] - write_descrs_arrays = [] - write_descrs_interiorfields = [] - # - if not hasattr(cpu, '_descr_counter_cache'): - cpu._descr_counter_cache = DescrCounterCache() return EffectInfo(readonly_descrs_fields, readonly_descrs_arrays, readonly_descrs_interiorfields, @@ -327,8 +297,7 @@ oopspecindex, can_invalidate, call_release_gil_target, - extradescr, - cpu._descr_counter_cache) + extradescr) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,23 +1,19 @@ import pytest from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, - EffectInfo, VirtualizableAnalyzer, expand_descr_list) + EffectInfo, VirtualizableAnalyzer) from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import OBJECT from rpython.translator.translator import TranslationContext, graphof -class FakeDescr(tuple): - pass - - class FakeCPU(object): def fielddescrof(self, T, fieldname): - return FakeDescr(('fielddescr', T, fieldname)) + return ('fielddescr', T, fieldname) def arraydescrof(self, A): - return FakeDescr(('arraydescr', A)) + return ('arraydescr', A) def test_no_oopspec_duplicate(): @@ -32,98 +28,83 @@ def test_include_read_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert (expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == - [('fielddescr', S, "a")]) - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert list(effectinfo.readonly_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo.write_descrs_fields + assert not effectinfo.write_descrs_arrays def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert (expand_descr_list(cpu, effectinfo.write_descrs_fields) == - [('fielddescr', S, "a")]) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.write_descrs_arrays def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert (expand_descr_list(cpu, effectinfo.readonly_descrs_arrays) == - [('arraydescr', A)]) - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert not effectinfo.readonly_descrs_fields + assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo.write_descrs_fields + assert not effectinfo.write_descrs_arrays def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert (expand_descr_list(cpu, effectinfo.write_descrs_arrays) == - [('arraydescr', A)]) + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.write_descrs_fields + assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] def test_dont_include_read_and_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a"), ("struct", lltype.Ptr(S), "a")]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert (expand_descr_list(cpu, effectinfo.write_descrs_fields) == - [('fielddescr', S, "a")]) - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert not effectinfo.readonly_descrs_fields + assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo.write_descrs_arrays def test_dont_include_read_and_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A)), ("array", lltype.Ptr(A))]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.readonly_descrs_arrays) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert (expand_descr_list(cpu, effectinfo.write_descrs_arrays) == - [('arraydescr', A)]) + effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.readonly_descrs_arrays + assert not effectinfo.write_descrs_fields + assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] def test_filter_out_typeptr(): effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, None) + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.write_descrs_fields + assert not effectinfo.write_descrs_arrays def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, None) + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.write_descrs_fields + assert not effectinfo.write_descrs_arrays def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) - cpu = FakeCPU() - effectinfo = effectinfo_from_writeanalyze(effects, cpu) - assert expand_descr_list(cpu, effectinfo.readonly_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_fields) == [] - assert expand_descr_list(cpu, effectinfo.write_descrs_arrays) == [] + effectinfo = effectinfo_from_writeanalyze(effects, None) + assert not effectinfo.readonly_descrs_fields + assert not effectinfo.write_descrs_fields + assert not effectinfo.write_descrs_arrays class TestVirtualizableAnalyzer(object): From pypy.commits at gmail.com Tue Apr 26 04:44:23 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 01:44:23 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Compute the bitstrings Message-ID: <571f2a67.a60ac20a.48b0a.3d73@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83895:ce207cea6d2c Date: 2016-04-26 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/ce207cea6d2c/ Log: Compute the bitstrings diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -1,7 +1,9 @@ +import sys from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem from rpython.rtyper.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer +from rpython.tool.algo import bitstring class EffectInfo(object): @@ -110,12 +112,20 @@ can_invalidate=False, call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, extradescrs=None): - key = (frozenset_or_none(readonly_descrs_fields), - frozenset_or_none(readonly_descrs_arrays), - frozenset_or_none(readonly_descrs_interiorfields), - frozenset_or_none(write_descrs_fields), - frozenset_or_none(write_descrs_arrays), - frozenset_or_none(write_descrs_interiorfields), + readonly_descrs_fields = frozenset_or_none(readonly_descrs_fields) + readonly_descrs_arrays = frozenset_or_none(readonly_descrs_arrays) + readonly_descrs_interiorfields = frozenset_or_none( + readonly_descrs_interiorfields) + write_descrs_fields = frozenset_or_none(write_descrs_fields) + write_descrs_arrays = frozenset_or_none(write_descrs_arrays) + write_descrs_interiorfields = frozenset_or_none( + write_descrs_interiorfields) + key = (readonly_descrs_fields, + readonly_descrs_arrays, + readonly_descrs_interiorfields, + write_descrs_fields, + write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate) @@ -139,22 +149,24 @@ assert write_descrs_arrays is not None assert write_descrs_interiorfields is not None result = object.__new__(cls) - result.readonly_descrs_fields = readonly_descrs_fields - result.readonly_descrs_arrays = readonly_descrs_arrays - result.readonly_descrs_interiorfields = readonly_descrs_interiorfields + # the frozensets "._readonly_xxx" and "._write_xxx" should not be + # translated. + result._readonly_descrs_fields = readonly_descrs_fields + result._readonly_descrs_arrays = readonly_descrs_arrays + result._readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: # Ignore the writes. Note that this ignores also writes with # no corresponding reads (rarely the case, but possible). - result.write_descrs_fields = [] - result.write_descrs_arrays = [] - result.write_descrs_interiorfields = [] + result._write_descrs_fields = frozenset() + result._write_descrs_arrays = frozenset() + result._write_descrs_interiorfields = frozenset() else: - result.write_descrs_fields = write_descrs_fields - result.write_descrs_arrays = write_descrs_arrays - result.write_descrs_interiorfields = write_descrs_interiorfields + result._write_descrs_fields = write_descrs_fields + result._write_descrs_arrays = write_descrs_arrays + result._write_descrs_interiorfields = write_descrs_interiorfields result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex @@ -165,6 +177,25 @@ cls._cache[key] = result return result + def check_readonly_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_fields, + fielddescr.ei_index) + def check_write_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_write_descrs_fields, + fielddescr.ei_index) + def check_readonly_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_arrays, + arraydescr.ei_index) + def check_write_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_write_descrs_arrays, + arraydescr.ei_index) + def check_readonly_descr_interiorfield(self, interiorfielddescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_write_descr_interiorfield(self, interiorfielddescr): + return bitstring.bitcheck(self.bitstring_write_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_can_raise(self, ignore_memoryerror=False): if ignore_memoryerror: return self.extraeffect > self.EF_ELIDABLE_OR_MEMORYERROR @@ -382,3 +413,55 @@ assert funcptr return funcptr funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(1)' + +# ____________________________________________________________ + +def compute_bitstrings(all_descrs): + # Compute the bitstrings in the EffectInfo, + # bitstring_{readonly,write}_descrs_{fieldd,arrays,interiordescrs}, + # and for each FieldDescrs and ArrayDescrs compute 'ei_index'. + # Each bit in the bitstrings says whether this Descr is present in + # this EffectInfo or not. We try to share the value of 'ei_index' + # across multiple Descrs if they always give the same answer (in + # PyPy, it reduces the length of the bitstrings from 4000+ to + # 373). + effectinfos = [] + descrs = {'fields': set(), 'arrays': set(), 'interiorfields': set()} + for descr in all_descrs: + if hasattr(descr, 'get_extra_info'): + ei = descr.get_extra_info() + if ei._readonly_descrs_fields is None: + for key in descrs: + assert getattr(ei, '_readonly_descrs_' + key) is None + assert getattr(ei, '_write_descrs_' + key) is None + setattr(ei, 'bitstring_readonly_descrs_' + key, None) + setattr(ei, 'bitstring_write_descrs_' + key, None) + else: + effectinfos.append(ei) + for key in descrs: + descrs[key].update(getattr(ei, '_readonly_descrs_' + key)) + descrs[key].update(getattr(ei, '_write_descrs_' + key)) + else: + descr.ei_index = sys.maxint + for key in descrs: + mapping = {} + for descr in descrs[key]: + assert descr.ei_index == sys.maxint # not modified yet + eisetr = [ei for ei in effectinfos + if descr in getattr(ei, '_readonly_descrs_' + key)] + eisetw = [ei for ei in effectinfos + if descr in getattr(ei, '_write_descrs_' + key)] + eisetr = frozenset(eisetr) + eisetw = frozenset(eisetw) + descr.ei_index = mapping.setdefault((eisetr, eisetw), len(mapping)) + for ei in effectinfos: + bitstrr = [descr.ei_index + for descr in getattr(ei, '_readonly_descrs_' + key)] + bitstrw = [descr.ei_index + for descr in getattr(ei, '_write_descrs_' + key)] + assert sys.maxint not in bitstrr + assert sys.maxint not in bitstrw + bitstrr = bitstring.make_bitstring(bitstrr) + bitstrw = bitstring.make_bitstring(bitstrw) + setattr(ei, 'bitstring_readonly_descrs_' + key, bitstrr) + setattr(ei, 'bitstring_write_descrs_' + key, bitstrw) diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,11 +1,12 @@ -import pytest +import pytest, sys from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, - EffectInfo, VirtualizableAnalyzer) + EffectInfo, VirtualizableAnalyzer, compute_bitstrings) from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import OBJECT from rpython.translator.translator import TranslationContext, graphof +from rpython.tool.algo.bitstring import bitcheck class FakeCPU(object): @@ -29,37 +30,37 @@ S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.readonly_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._readonly_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._readonly_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] def test_dont_include_read_and_write_field(): @@ -67,9 +68,9 @@ effects = frozenset([("readstruct", lltype.Ptr(S), "a"), ("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_arrays def test_dont_include_read_and_write_array(): @@ -77,34 +78,34 @@ effects = frozenset([("readarray", lltype.Ptr(A)), ("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.readonly_descrs_arrays - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._readonly_descrs_arrays + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] def test_filter_out_typeptr(): effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays class TestVirtualizableAnalyzer(object): @@ -138,3 +139,64 @@ res = self.analyze(entry, [int]) assert not res + + +def test_compute_bitstrings(): + class FDescr: + pass + class ADescr: + pass + class CDescr: + def __init__(self, ei): + self._ei = ei + def get_extra_info(self): + return self._ei + + f1descr = FDescr() + f2descr = FDescr() + f3descr = FDescr() + a1descr = ADescr() + a2descr = ADescr() + + ei1 = EffectInfo(None, None, None, None, None, None, + EffectInfo.EF_RANDOM_EFFECTS) + ei2 = EffectInfo([f1descr], [], [], [], [], []) + ei3 = EffectInfo([f1descr], [a1descr, a2descr], [], [f2descr], [], []) + + compute_bitstrings([CDescr(ei1), CDescr(ei2), CDescr(ei3), + f1descr, f2descr, f3descr, a1descr, a2descr]) + + assert f1descr.ei_index in (0, 1) + assert f2descr.ei_index == 1 - f1descr.ei_index + assert f3descr.ei_index == sys.maxint + assert a1descr.ei_index == 0 + assert a2descr.ei_index == 0 + + assert ei1.bitstring_readonly_descrs_fields is None + assert ei1.bitstring_readonly_descrs_arrays is None + assert ei1.bitstring_write_descrs_fields is None + + def expand(bitstr): + return [n for n in range(10) if bitcheck(bitstr, n)] + + assert expand(ei2.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei2.bitstring_write_descrs_fields) == [] + assert expand(ei2.bitstring_readonly_descrs_arrays) == [] + assert expand(ei2.bitstring_write_descrs_arrays) == [] + + assert expand(ei3.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei3.bitstring_write_descrs_fields) == [f2descr.ei_index] + assert expand(ei3.bitstring_readonly_descrs_arrays) == [0] #a1descr,a2descr + assert expand(ei3.bitstring_write_descrs_arrays) == [] + + for ei in [ei2, ei3]: + for fdescr in [f1descr, f2descr]: + assert ei.check_readonly_descr_field(fdescr) == ( + fdescr in ei._readonly_descrs_fields) + assert ei.check_write_descr_field(fdescr) == ( + fdescr in ei._write_descrs_fields) + for adescr in [a1descr, a2descr]: + assert ei.check_readonly_descr_array(adescr) == ( + adescr in ei._readonly_descrs_arrays) + assert ei.check_write_descr_array(adescr) == ( + adescr in ei._write_descrs_arrays) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1809,6 +1809,7 @@ self._addr2name_values = [value for key, value in list_of_addr2name] def finish_setup(self, codewriter, optimizer=None): + from rpython.jit.codewriter import effectinfo from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder self.blackholeinterpbuilder = BlackholeInterpBuilder(codewriter, self) # @@ -1839,6 +1840,7 @@ # self.globaldata = MetaInterpGlobalData(self) self.all_descrs = self.cpu.setup_descrs() + effectinfo.compute_bitstrings(self.all_descrs) def _setup_once(self): """Runtime setup needed by the various components of the JIT.""" From pypy.commits at gmail.com Tue Apr 26 04:51:58 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 26 Apr 2016 01:51:58 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: document branch Message-ID: <571f2c2e.55301c0a.b9efb.ffffa52c@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83896:c53eff8521b5 Date: 2016-04-26 11:29 +0300 http://bitbucket.org/pypy/pypy/changeset/c53eff8521b5/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,7 @@ .. this is a revision shortly after release-5.1 .. startrev: aa60332382a1 +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. From pypy.commits at gmail.com Tue Apr 26 04:52:00 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 26 Apr 2016 01:52:00 -0700 (PDT) Subject: [pypy-commit] pypy remove-objspace-options: close to-be-merged branch Message-ID: <571f2c30.82bb1c0a.e7ed0.ffffa3ec@mx.google.com> Author: Carl Friedrich Bolz Branch: remove-objspace-options Changeset: r83897:c4390e1b7a65 Date: 2016-04-26 11:47 +0300 http://bitbucket.org/pypy/pypy/changeset/c4390e1b7a65/ Log: close to-be-merged branch From pypy.commits at gmail.com Tue Apr 26 04:52:02 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 26 Apr 2016 01:52:02 -0700 (PDT) Subject: [pypy-commit] pypy default: merge remove-objspace-options Message-ID: <571f2c32.d81a1c0a.16653.ffffa589@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83898:f05e62a3790d Date: 2016-04-26 11:50 +0300 http://bitbucket.org/pypy/pypy/changeset/f05e62a3790d/ Log: merge remove-objspace-options remove a number of pypy configuration options that essentially always had the same value. This also makes mapdict be used more in subclassing of (non-object) interp-level classes. It also fixes a bug in the method cache that slowed the interpreter down significantly (by about 10-20%) diff too long, truncating to 2000 out of 2520 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,15 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -222,39 +213,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -265,22 +231,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -296,15 +250,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -317,16 +266,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations @@ -114,8 +120,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations @@ -133,8 +139,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,3 +10,9 @@ .. branch: gcheader-decl Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -951,8 +951,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,57 +157,16 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,19 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + + self.cls_without_del = _getusercls( + space.config, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space.config, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,7 +106,6 @@ """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', 'itertools', 'time', 'binascii', 'micronumpy']) - spaceconfig['std.withmethodcache'] = True enable_leak_checking = True diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -6,15 +6,14 @@ @unwrap_spec(generation=int) def collect(space, generation=0): "Run a full collection. The optional argument is ignored." - # First clear the method cache. See test_gc for an example of why. - if space.config.objspace.std.withmethodcache: - from pypy.objspace.std.typeobject import MethodCache - cache = space.fromcache(MethodCache) - cache.clear() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import MapAttrCache - cache = space.fromcache(MapAttrCache) - cache.clear() + # First clear the method and the map cache. + # See test_gc for an example of why. + from pypy.objspace.std.typeobject import MethodCache + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MethodCache) + cache.clear() + cache = space.fromcache(MapAttrCache) + cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -106,7 +106,6 @@ class AppTestGcMethodCache(object): - spaceconfig = {"objspace.std.withmethodcache": True} def test_clear_method_cache(self): import gc, weakref @@ -127,10 +126,6 @@ assert r() is None -class AppTestGcMapDictIndexCache(AppTestGcMethodCache): - spaceconfig = {"objspace.std.withmethodcache": True, - "objspace.std.withmapdict": True} - def test_clear_index_cache(self): import gc, weakref rlist = [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,9 +25,9 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p65 = getfield_gc_r(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -120,6 +120,8 @@ 'bytearray', 'buffer', 'set', 'frozenset'] class FakeObjSpace(ObjSpace): + is_fake_objspace = True + def __init__(self, config=None): self._seen_extras = [] ObjSpace.__init__(self, config=config) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -841,34 +841,12 @@ return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') -W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] -del i def wrapstr(space, s): - if space.config.objspace.std.sharesmallstr: - if space.config.objspace.std.withprebuiltchar: - # share characters and empty string - if len(s) <= 1: - if len(s) == 0: - return W_BytesObject.EMPTY - else: - s = s[0] # annotator hint: a single char - return wrapchar(space, s) - else: - # only share the empty string - if len(s) == 0: - return W_BytesObject.EMPTY return W_BytesObject(s) -def wrapchar(space, c): - if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): - return W_BytesObject.PREBUILT[ord(c)] - else: - return W_BytesObject(c) - - W_BytesObject.typedef = TypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -23,6 +23,7 @@ def LOOKUP_METHOD(f, nameindex, *ignored): + from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # @@ -33,7 +34,7 @@ space = f.space w_obj = f.popvalue() - if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): + if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -44,7 +45,18 @@ w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) - w_descr = w_type.lookup(name) + # bit of a mess to use these internal functions, but it allows the + # mapdict caching below to work without an additional lookup + version_tag = w_type.version_tag() + if version_tag is None: + _, w_descr = w_type._lookup_where(name) + w_descr_cell = None + else: + _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( + name, version_tag) + w_descr = w_descr_cell + if isinstance(w_descr, MutableCell): + w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -59,11 +71,11 @@ # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) - if (space.config.objspace.std.withmapdict and - not jit.we_are_jitted()): + if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( - space, f.getcode(), name, nameindex, w_obj, w_type) + space, f.getcode(), name, nameindex, w_obj, w_type, + w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -66,10 +66,10 @@ w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) W_ModuleDictObject.__init__(w_obj, space, strategy, storage) return w_obj - elif space.config.objspace.std.withmapdict and instance: + elif instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - elif instance or strdict or module: + elif strdict or module: assert w_type is None strategy = space.fromcache(BytesDictStrategy) elif kwargs: @@ -592,7 +592,6 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): - withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: self.switch_to_bytes_strategy(w_dict) return @@ -602,7 +601,7 @@ w_type = self.space.type(w_key) if self.space.is_w(w_type, self.space.w_int): self.switch_to_int_strategy(w_dict) - elif withidentitydict and w_type.compares_by_identity(): + elif w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -67,12 +67,7 @@ @jit.elidable def find_map_attr(self, name, index): - if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(name, index) - return self._find_map_attr(name, index) - - @jit.dont_look_inside - def _find_map_attr_cache(self, name, index): + # attr cache space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -429,7 +424,6 @@ class MapAttrCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self.names = [None] * SIZE @@ -456,12 +450,19 @@ INVALID = 2 SLOTS_STARTING_FROM = 3 +# a little bit of a mess of mixin classes that implement various pieces of +# objspace user object functionality in terms of mapdict -class BaseMapdictObject: - _mixin_ = True +class BaseUserClassMapdict: + # everything that's needed to use mapdict for a user subclass at all. + # This immediately makes slots possible. - def _init_empty(self, map): - raise NotImplementedError("abstract base class") + # assumes presence of _init_empty, _mapdict_read_storage, + # _mapdict_write_storage, _mapdict_storage_length, + # _set_mapdict_storage_and_map + + # _____________________________________________ + # methods needed for mapdict def _become(self, new_obj): self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) @@ -470,49 +471,11 @@ return jit.promote(self.map) def _set_mapdict_map(self, map): self.map = map + # _____________________________________________ # objspace interface - def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, attrname, DICT) - - def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, attrname, DICT, w_value) - - def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, attrname, DICT) - if new_obj is None: - return False - self._become(new_obj) - return True - - def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) - if w_dict is not None: - assert isinstance(w_dict, W_DictMultiObject) - return w_dict - - strategy = space.fromcache(MapDictStrategy) - storage = strategy.erase(self) - w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag - return w_dict - - def setdict(self, space, w_dict): - from pypy.interpreter.typedef import check_new_dictionary - w_dict = check_new_dictionary(space, w_dict) - w_olddict = self.getdict(space) - assert isinstance(w_dict, W_DictMultiObject) - # The old dict has got 'self' as dstorage, but we are about to - # change self's ("dict", SPECIAL) attribute to point to the - # new dict. If the old dict was using the MapDictStrategy, we - # have to force it now: otherwise it would remain an empty - # shell that continues to delegate to 'self'. - if type(w_olddict.get_strategy()) is MapDictStrategy: - w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag + # class access def getclass(self, space): return self._get_mapdict_map().terminator.w_cls @@ -525,9 +488,13 @@ from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space assert (not self.typedef.hasdict or + isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) + + # methods needed for slots + def getslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex return self._get_mapdict_map().read(self, "slot", index) @@ -544,7 +511,9 @@ self._become(new_obj) return True - # used by _weakref implemenation + +class MapdictWeakrefSupport(object): + # stuff used by the _weakref implementation def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline @@ -565,8 +534,71 @@ self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True -class ObjectMixin(object): - _mixin_ = True + +class MapdictDictSupport(object): + + # objspace interface for dictionary operations + + def getdictvalue(self, space, attrname): + return self._get_mapdict_map().read(self, attrname, DICT) + + def setdictvalue(self, space, attrname, w_value): + return self._get_mapdict_map().write(self, attrname, DICT, w_value) + + def deldictvalue(self, space, attrname): + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) + if new_obj is None: + return False + self._become(new_obj) + return True + + def getdict(self, space): + return _obj_getdict(self, space) + + def setdict(self, space, w_dict): + _obj_setdict(self, space, w_dict) + +# a couple of helpers for the classes above, factored out to reduce +# the translated code size + + at objectmodel.dont_inline +def _obj_getdict(self, space): + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) + if w_dict is not None: + assert isinstance(w_dict, W_DictMultiObject) + return w_dict + + strategy = space.fromcache(MapDictStrategy) + storage = strategy.erase(self) + w_dict = W_DictObject(space, strategy, storage) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + return w_dict + + at objectmodel.dont_inline +def _obj_setdict(self, space, w_dict): + from pypy.interpreter.error import OperationError + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting dictionary to a non-dict")) + assert isinstance(w_dict, W_DictMultiObject) + w_olddict = self.getdict(space) + assert isinstance(w_olddict, W_DictMultiObject) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + +class MapdictStorageMixin(object): def _init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -585,51 +617,32 @@ self.storage = storage self.map = map -class Object(ObjectMixin, BaseMapdictObject, W_Root): - pass # mainly for tests +class ObjectWithoutDict(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def get_subclass_of_correct_size(space, cls, w_type): - assert space.config.objspace.std.withmapdict - map = w_type.terminator - classes = memo_get_subclass_of_correct_size(space, cls) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - return classes[0] - size = map.size_estimate() - debug.check_nonneg(size) - if size < len(classes): - return classes[size] - else: - return classes[len(classes)-1] -get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) -SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers -SUBCLASSES_MAX_FIELDS = 5 -def memo_get_subclass_of_correct_size(space, supercls): - key = space, supercls - try: - return _subclass_cache[key] - except KeyError: - assert not hasattr(supercls, "__del__") - result = [] - for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1): - result.append(_make_subclass_size_n(supercls, i)) - for i in range(SUBCLASSES_MIN_FIELDS): - result.insert(0, result[0]) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - assert len(set(result)) == 1 - _subclass_cache[key] = result - return result -memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" -_subclass_cache = {} +class Object(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def _make_subclass_size_n(supercls, n): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) + objectmodel.import_from_mixin(MapdictDictSupport) + + +SUBCLASSES_NUM_FIELDS = 5 + +def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): from rpython.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 - class subcls(BaseMapdictObject, supercls): + class subcls(object): def _init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) @@ -697,7 +710,7 @@ erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) - subcls.__name__ = supercls.__name__ + "Size%s" % n + subcls.__name__ = "Size%s" % n return subcls # ____________________________________________________________ @@ -964,7 +977,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a MutableCell, which may change without changing the version_tag - _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( + _, w_descr = w_type._pure_lookup_where_with_method_cache( name, version_tag) # attrname, index = ("", INVALID) @@ -1011,22 +1024,15 @@ return False def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex, - w_obj, w_type): + w_obj, w_type, w_method): + if w_method is None or isinstance(w_method, MutableCell): + # don't cache the MutableCell XXX could be fixed + return version_tag = w_type.version_tag() - if version_tag is None: - return + assert version_tag is not None map = w_obj._get_mapdict_map() if map is None or isinstance(map.terminator, DevolvedDictTerminator): return - # We know here that w_obj.getdictvalue(space, name) just returned None, - # so the 'name' is not in the instance. We repeat the lookup to find it - # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a MutableCell --- which we - # must not cache. (It should not be None here, but you never know...) - _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( - name, version_tag) - if w_method is None or isinstance(w_method, MutableCell): - return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -356,15 +356,8 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if (self.config.objspace.std.withmapdict and cls is W_ObjectObject - and not w_subtype.needsdel): - from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, cls, w_subtype) - else: - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, - w_subtype.layout.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) @@ -517,7 +510,6 @@ return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) _DescrOperation_is_true = is_true - _DescrOperation_getattr = getattr def is_true(self, w_obj): # a shortcut for performance @@ -526,8 +518,6 @@ return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): - if not self.config.objspace.std.getattributeshortcut: - return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -795,13 +795,3 @@ return 42 x = Foo() assert "hello" + x == 42 - -class AppTestPrebuilt(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True} - -class AppTestShare(AppTestBytesObject): - spaceconfig = {"objspace.std.sharesmallstr": True} - -class AppTestPrebuiltShare(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True, - "objspace.std.sharesmallstr": True} diff --git a/pypy/objspace/std/test/test_callmethod.py b/pypy/objspace/std/test/test_callmethod.py --- a/pypy/objspace/std/test/test_callmethod.py +++ b/pypy/objspace/std/test/test_callmethod.py @@ -97,21 +97,17 @@ else: raise Exception("did not raise?") """ - + def test_kwargs(self): exec """if 1: class C(object): def f(self, a): return a + 2 - + assert C().f(a=3) == 5 """ -class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestCallMethod: def test_space_call_method(self): space = self.space diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1113,11 +1113,9 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withmethodcache = False - withidentitydict = False - withmapdict = False + methodcachesizeexp = 11 + withmethodcachecounter = False FakeSpace.config = Config() diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,62 +1,7 @@ import py from pypy.interpreter.gateway import interp2app -class AppTestComparesByIdentity: - spaceconfig = {"objspace.std.withidentitydict": True} - - def setup_class(cls): - from pypy.objspace.std import identitydict - if cls.runappdirect: - py.test.skip("interp2app doesn't work on appdirect") - - def compares_by_identity(space, w_cls): - return space.wrap(w_cls.compares_by_identity()) - cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) - - def test_compares_by_identity(self): - class Plain(object): - pass - - class CustomEq(object): - def __eq__(self, other): - return True - - class CustomCmp (object): - def __cmp__(self, other): - return 0 - - class CustomHash(object): - def __hash__(self): - return 0 - - class TypeSubclass(type): - pass - - class TypeSubclassCustomCmp(type): - def __cmp__(self, other): - return 0 - - assert self.compares_by_identity(Plain) - assert not self.compares_by_identity(CustomEq) - assert not self.compares_by_identity(CustomCmp) - assert not self.compares_by_identity(CustomHash) - assert self.compares_by_identity(type) - assert self.compares_by_identity(TypeSubclass) - assert not self.compares_by_identity(TypeSubclassCustomCmp) - - def test_modify_class(self): - class X(object): - pass - - assert self.compares_by_identity(X) - X.__eq__ = lambda x: None - assert not self.compares_by_identity(X) - del X.__eq__ - assert self.compares_by_identity(X) - - class AppTestIdentityDict(object): - spaceconfig = {"objspace.std.withidentitydict": True} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -3,9 +3,6 @@ class AppTestIdentitySet(object): - # needed for compares_by_identity - spaceconfig = {"objspace.std.withidentitydict": True} - def setup_class(cls): from pypy.objspace.std import identitydict if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -432,6 +432,8 @@ class AppTestListObject(object): + spaceconfig = {"objspace.std.withliststrategies": True} # it's the default + def setup_class(cls): import platform import sys @@ -1590,20 +1592,13 @@ assert L3.index(-0.0, i) == i -class AppTestListObjectWithRangeList(AppTestListObject): - """Run the list object tests with range lists enabled. Tests should go in - AppTestListObject so they can be run -A against CPython as well. - """ - spaceconfig = {"objspace.std.withrangelist": True} - - class AppTestRangeListForcing: """Tests for range lists that test forcing. Regular tests should go in AppTestListObject so they can be run -A against CPython as well. Separate from AppTestListObjectWithRangeList so we don't silently overwrite tests with the same names. """ - spaceconfig = {"objspace.std.withrangelist": True} + spaceconfig = {"objspace.std.withliststrategies": True} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -4,18 +4,16 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withmethodcache = False - withidentitydict = False - withmapdict = True + methodcachesizeexp = 11 + withmethodcachecounter = False space = FakeSpace() space.config = Config class Class(object): def __init__(self, hasdict=True): - self.hasdict = True + self.hasdict = hasdict if hasdict: self.terminator = DictTerminator(space, self) else: @@ -24,10 +22,17 @@ def instantiate(self, sp=None): if sp is None: sp = space - result = Object() + if self.hasdict: + result = Object() + else: + result = ObjectWithoutDict() result.user_setup(sp, self) return result +class ObjectWithoutDict(ObjectWithoutDict): + class typedef: + hasdict = False + class Object(Object): class typedef: hasdict = False @@ -431,6 +436,9 @@ assert obj.getslotvalue(b) == 60 assert obj.storage == [50, 60] assert not obj.setdictvalue(space, "a", 70) + assert obj.getdict(space) is None + assert obj.getdictvalue(space, "a") is None + def test_getdict(): cls = Class() @@ -591,15 +599,20 @@ def test_specialized_class(): + from pypy.objspace.std.mapdict import _make_storage_mixin_size_n from pypy.objspace.std.objectobject import W_ObjectObject - classes = memo_get_subclass_of_correct_size(space, W_ObjectObject) + classes = [_make_storage_mixin_size_n(i) for i in range(2, 10)] w1 = W_Root() w2 = W_Root() w3 = W_Root() w4 = W_Root() w5 = W_Root() w6 = W_Root() - for objectcls in classes: + for mixin in classes: + class objectcls(W_ObjectObject): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictDictSupport) + objectmodel.import_from_mixin(mixin) cls = Class() obj = objectcls() obj.user_setup(space, cls) @@ -646,7 +659,6 @@ # XXX write more class AppTestWithMapDict(object): - spaceconfig = {"objspace.std.withmapdict": True} def test_simple(self): class A(object): @@ -863,8 +875,7 @@ class AppTestWithMapDictAndCounters(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def setup_class(cls): from pypy.interpreter import gateway @@ -1207,8 +1218,7 @@ assert got == 'd' class AppTestGlobalCaching(AppTestWithMapDict): - spaceconfig = {"objspace.std.withmethodcachecounter": True, - "objspace.std.withmapdict": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_mix_classes(self): import __pypy__ @@ -1265,8 +1275,7 @@ assert 0, "failed: got %r" % ([got[1] for got in seen],) class TestDictSubclassShortcutBug(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_bug(self): w_dict = self.space.appexec([], """(): diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -202,7 +202,8 @@ l = [type.__getattribute__(A, "__new__")(A)] * 10 __pypy__.reset_method_cache_counter() for i, a in enumerate(l): - assert a.f() == 42 + # use getattr to circumvent the mapdict cache + assert getattr(a, "f")() == 42 cache_counter = __pypy__.method_cache_counter("f") assert sum(cache_counter) == 10 if cache_counter == (9, 1): @@ -225,9 +226,11 @@ assert a.x == i + 1 A.x += 1 cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] >= 350 + # XXX this is the bad case for the mapdict cache: looking up + # non-method attributes from the class + assert cache_counter[0] >= 450 assert cache_counter[1] >= 1 - assert sum(cache_counter) == 400 + assert sum(cache_counter) == 500 __pypy__.reset_method_cache_counter() a = A() diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1,3 +1,4 @@ +import py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef @@ -1105,7 +1106,6 @@ class AppTestGetattributeShortcut: - spaceconfig = {"objspace.std.getattributeshortcut": True} def test_reset_logic(self): class X(object): @@ -1239,3 +1239,57 @@ class Y: __metaclass__ = X assert (Y < Y) is True + + +class AppTestComparesByIdentity: + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("interp2app doesn't work on appdirect") + + def compares_by_identity(space, w_cls): + return space.wrap(w_cls.compares_by_identity()) + cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) + + def test_compares_by_identity(self): + class Plain(object): + pass + + class CustomEq(object): + def __eq__(self, other): + return True + + class CustomCmp (object): + def __cmp__(self, other): + return 0 + + class CustomHash(object): + def __hash__(self): + return 0 + + class TypeSubclass(type): + pass + + class TypeSubclassCustomCmp(type): + def __cmp__(self, other): + return 0 + + assert self.compares_by_identity(Plain) + assert not self.compares_by_identity(CustomEq) + assert not self.compares_by_identity(CustomCmp) + assert not self.compares_by_identity(CustomHash) + assert self.compares_by_identity(type) + assert self.compares_by_identity(TypeSubclass) + assert not self.compares_by_identity(TypeSubclassCustomCmp) + + def test_modify_class(self): + class X(object): + pass + + assert self.compares_by_identity(X) + X.__eq__ = lambda x: None + assert not self.compares_by_identity(X) + del X.__eq__ + assert self.compares_by_identity(X) + + diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -273,13 +273,3 @@ i += 1 -class AppTestWithGetAttributeShortcut(AppTestUserObject): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - -class AppTestDescriptorWithGetAttributeShortcut( - test_descriptor.AppTest_Descriptor): - # for the individual tests see - # ====> ../../test/test_descriptor.py - - spaceconfig = {"objspace.std.getattributeshortcut": True} diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -1,7 +1,6 @@ from pypy.objspace.std.test import test_typeobject class TestVersionedType(test_typeobject.TestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} def get_three_classes(self): space = self.space @@ -261,6 +260,3 @@ -class AppTestVersionedType(test_typeobject.AppTestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} - diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,3 +1,4 @@ +import weakref from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root, SpaceCache from pypy.interpreter.error import oefmt, OperationError @@ -9,6 +10,7 @@ from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, elidable, dont_look_inside, unroll_safe) from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint class MutableCell(W_Root): @@ -38,9 +40,8 @@ def unwrap_cell(space, w_value): - if space.config.objspace.std.withtypeversion: - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value def write_cell(space, w_cell, w_value): @@ -69,7 +70,6 @@ class MethodCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.versions = [None] * SIZE self.names = [None] * SIZE @@ -87,6 +87,10 @@ for i in range(len(self.lookup_where)): self.lookup_where[i] = None_None +class _Global(object): + weakref_warning_printed = False +_global = _Global() + class Layout(object): """A Layout is attached to every W_TypeObject to represent the @@ -126,6 +130,7 @@ "flag_cpytype", "flag_abstract?", "flag_sequence_bug_compat", + "compares_by_identity_status?", 'needsdel', 'weakrefable', 'hasdict', @@ -136,11 +141,11 @@ 'mro_w?[*]', ] - # for config.objspace.std.getattributeshortcut + # wether the class has an overridden __getattribute__ # (False is a conservative default, fixed during real usage) uses_object_getattribute = False - # for config.objspace.std.withidentitydict + # for the IdentityDictStrategy compares_by_identity_status = UNKNOWN # used to cache the type's __new__ function @@ -170,20 +175,24 @@ layout = setup_user_defined_type(w_self, force_new_layout) w_self.layout = layout - if space.config.objspace.std.withtypeversion: - if not is_mro_purely_of_types(w_self.mro_w): - pass - else: - # the _version_tag should change, whenever the content of - # dict_w of any of the types in the mro changes, or if the mro - # itself changes - w_self._version_tag = VersionTag() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator - if w_self.hasdict: - w_self.terminator = DictTerminator(space, w_self) - else: - w_self.terminator = NoDictTerminator(space, w_self) + if not is_mro_purely_of_types(w_self.mro_w): + pass + else: + # the _version_tag should change, whenever the content of + # dict_w of any of the types in the mro changes, or if the mro + # itself changes + w_self._version_tag = VersionTag() + from pypy.objspace.std.mapdict import DictTerminator, NoDictTerminator + # if the typedef has a dict, then the rpython-class does all the dict + # management, which means from the point of view of mapdict there is no + # dict. However, W_InstanceObjects are an exception to this + from pypy.module.__builtin__.interp_classobj import W_InstanceObject + typedef = w_self.layout.typedef + if (w_self.hasdict and not typedef.hasdict or + typedef is W_InstanceObject.typedef): + w_self.terminator = DictTerminator(space, w_self) + else: + w_self.terminator = NoDictTerminator(space, w_self) def __repr__(self): "NOT_RPYTHON" @@ -197,26 +206,18 @@ """ space = w_self.space assert w_self.is_heaptype() or w_self.is_cpytype() - if (not space.config.objspace.std.withtypeversion and - not space.config.objspace.std.getattributeshortcut and - not space.config.objspace.std.withidentitydict and - not space.config.objspace.std.newshortcut): - return - if space.config.objspace.std.getattributeshortcut: - w_self.uses_object_getattribute = False - # ^^^ conservative default, fixed during real usage + w_self.uses_object_getattribute = False + # ^^^ conservative default, fixed during real usage - if space.config.objspace.std.withidentitydict: - if (key is None or key == '__eq__' or - key == '__cmp__' or key == '__hash__'): - w_self.compares_by_identity_status = UNKNOWN + if (key is None or key == '__eq__' or + key == '__cmp__' or key == '__hash__'): + w_self.compares_by_identity_status = UNKNOWN if space.config.objspace.std.newshortcut: w_self.w_new_function = None - if (space.config.objspace.std.withtypeversion - and w_self._version_tag is not None): + if w_self._version_tag is not None: w_self._version_tag = VersionTag() subclasses_w = w_self.get_subclasses() @@ -239,15 +240,13 @@ the one from object, in which case it returns None """ from pypy.objspace.descroperation import object_getattribute if not we_are_jitted(): - shortcut = w_self.space.config.objspace.std.getattributeshortcut - if not shortcut or not w_self.uses_object_getattribute: + if not w_self.uses_object_getattribute: # slow path: look for a custom __getattribute__ on the class w_descr = w_self.lookup('__getattribute__') # if it was not actually overriden in the class, we remember this # fact for the next time. if w_descr is object_getattribute(w_self.space): - if shortcut: - w_self.uses_object_getattribute = True + w_self.uses_object_getattribute = True else: return w_descr return None @@ -262,8 +261,6 @@ def compares_by_identity(w_self): from pypy.objspace.descroperation import object_hash, type_eq - if not w_self.space.config.objspace.std.withidentitydict: - return False # conservative # if w_self.compares_by_identity_status != UNKNOWN: # fast path @@ -296,13 +293,12 @@ return compute_C3_mro(w_self.space, w_self) def getdictvalue(w_self, space, attr): - if space.config.objspace.std.withtypeversion: - version_tag = w_self.version_tag() - if version_tag is not None: - return unwrap_cell( - space, - w_self._pure_getdictvalue_no_unwrapping( - space, version_tag, attr)) + version_tag = w_self.version_tag() + if version_tag is not None: + return unwrap_cell( + space, + w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, attr)) w_value = w_self._getdictvalue_no_unwrapping(space, attr) return unwrap_cell(space, w_value) @@ -333,14 +329,13 @@ msg = ("a __del__ method added to an existing type will not be " "called") space.warn(space.wrap(msg), space.w_RuntimeWarning) - if space.config.objspace.std.withtypeversion: - version_tag = w_self.version_tag() - if version_tag is not None: - w_curr = w_self._pure_getdictvalue_no_unwrapping( - space, version_tag, name) - w_value = write_cell(space, w_curr, w_value) - if w_value is None: - return True + version_tag = w_self.version_tag() + if version_tag is not None: + w_curr = w_self._pure_getdictvalue_no_unwrapping( + space, version_tag, name) + w_value = write_cell(space, w_curr, w_value) + if w_value is None: + return True w_self.mutated(name) w_self.dict_w[name] = w_value return True @@ -362,17 +357,11 @@ def lookup(w_self, name): # note that this doesn't call __get__ on the result at all space = w_self.space - if space.config.objspace.std.withmethodcache: - return w_self.lookup_where_with_method_cache(name)[1] - - return w_self._lookup(name) + return w_self.lookup_where_with_method_cache(name)[1] def lookup_where(w_self, name): space = w_self.space - if space.config.objspace.std.withmethodcache: - return w_self.lookup_where_with_method_cache(name) - - return w_self._lookup_where(name) + return w_self.lookup_where_with_method_cache(name) From pypy.commits at gmail.com Tue Apr 26 04:52:04 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 26 Apr 2016 01:52:04 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <571f2c34.50301c0a.b22c8.7420@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83899:4f5be790880a Date: 2016-04-26 11:51 +0300 http://bitbucket.org/pypy/pypy/changeset/4f5be790880a/ Log: merge heads diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: From pypy.commits at gmail.com Tue Apr 26 04:55:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Apr 2016 01:55:15 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: start_new_trace was called too late, it must be called before the optimizer is invoked. added an ABORT_TRACE marker and encoding it into the jitlog Message-ID: <571f2cf3.c50a1c0a.6ab6c.ffffa5db@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83900:1ba36757f66e Date: 2016-04-26 09:17 +0200 http://bitbucket.org/pypy/pypy/changeset/1ba36757f66e/ Log: start_new_trace was called too late, it must be called before the optimizer is invoked. added an ABORT_TRACE marker and encoding it into the jitlog diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -218,6 +218,7 @@ loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: + metainterp_sd.jitlog.trace_aborted() trace.cut_at(cut_at) return None loop = create_empty_loop(metainterp) @@ -251,7 +252,9 @@ history = metainterp.history trace = history.trace warmstate = jitdriver_sd.warmstate - + # + metainterp_sd.jitlog.start_new_trace(None, False) + # enable_opts = jitdriver_sd.warmstate.enable_opts if try_disabling_unroll: if 'unroll' not in enable_opts: @@ -276,6 +279,7 @@ preamble_data, metainterp.box_names_memo) except InvalidLoop: + metainterp_sd.jitlog.trace_aborted() history.cut(cut_at) return None @@ -292,6 +296,7 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + metainterp_sd.jitlog.trace_aborted() history.cut(cut_at) return None @@ -341,7 +346,9 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd history = metainterp.history - + # + metainterp_sd.jitlog.start_new_trace(resumekey, False) + # loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token @@ -369,6 +376,7 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + metainterp_sd.jitlog.trace_aborted() history.cut(cut) return None @@ -566,7 +574,6 @@ def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, operations, original_loop_token, memo): - metainterp_sd.jitlog.start_new_trace(faildescr) forget_optimization_info(operations) forget_optimization_info(inputargs) if not we_are_translated(): @@ -1018,7 +1025,7 @@ def compile_trace(metainterp, resumekey, runtime_boxes): """Try to compile a new bridge leading from the beginning of the history - to some existging place. + to some existing place. """ from rpython.jit.metainterp.optimizeopt import optimize_trace @@ -1031,6 +1038,9 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + # + metainterp_sd.jitlog.start_new_trace(resumekey, False) + # if isinstance(resumekey, ResumeAtPositionDescr): inline_short_preamble = False else: @@ -1055,6 +1065,7 @@ info, newops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: + metainterp_sd.jitlog.trace_aborted() #pdb.post_mortem(sys.exc_info()[2]) debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -174,6 +174,7 @@ ('JITLOG_HEADER',), ('MERGE_POINT',), ('COMMON_PREFIX',), + ('ABORT_TRACE',), ] start = 0x11 @@ -235,6 +236,9 @@ content.append(encode_le_addr(int(entry_bridge))) self._write_marked(MARK_START_TRACE, ''.join(content)) + def trace_aborted(self): + self._write_marked(MARK_ABORT_TRACE, encode_le_64bit(self.trace_id)) + def _write_marked(self, mark, line): if not we_are_translated(): assert self.cintf.jitlog_enabled() From pypy.commits at gmail.com Tue Apr 26 04:55:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Apr 2016 01:55:17 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: setting the metainterp_sd earlier to be able to resolve constptr even for the trace that has not been optimized Message-ID: <571f2cf5.26b0c20a.18b87.249d@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83901:ebe7bd49cf45 Date: 2016-04-26 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ebe7bd49cf45/ Log: setting the metainterp_sd earlier to be able to resolve constptr even for the trace that has not been optimized diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -253,7 +253,7 @@ trace = history.trace warmstate = jitdriver_sd.warmstate # - metainterp_sd.jitlog.start_new_trace(None, False) + metainterp_sd.jitlog.start_new_trace(metainterp_sd, None, False) # enable_opts = jitdriver_sd.warmstate.enable_opts if try_disabling_unroll: @@ -347,7 +347,7 @@ jitdriver_sd = metainterp.jitdriver_sd history = metainterp.history # - metainterp_sd.jitlog.start_new_trace(resumekey, False) + metainterp_sd.jitlog.start_new_trace(metainterp_sd, resumekey, False) # loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token @@ -512,7 +512,7 @@ def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type, orig_inpargs, memo): - metainterp_sd.jitlog.start_new_trace(None, type == "entry bridge") + metainterp_sd.jitlog.start_new_trace(metainterp_sd, None, type == "entry bridge") forget_optimization_info(loop.operations) forget_optimization_info(loop.inputargs) vinfo = jitdriver_sd.virtualizable_info @@ -1039,7 +1039,7 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd # - metainterp_sd.jitlog.start_new_trace(resumekey, False) + metainterp_sd.jitlog.start_new_trace(metainterp_sd, resumekey, False) # if isinstance(resumekey, ResumeAtPositionDescr): inline_short_preamble = False diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -209,6 +209,7 @@ self.cintf = cintf.setup() self.memo = {} self.trace_id = -1 + self.metainterp_sd = None def setup_once(self): if self.cintf.jitlog_enabled(): @@ -222,9 +223,10 @@ def finish(self): self.cintf.jitlog_teardown() - def start_new_trace(self, faildescr=None, entry_bridge=False): + def start_new_trace(self, metainterp_sd, faildescr=None, entry_bridge=False): if not self.cintf.jitlog_enabled(): return + self.metainterp_sd = metainterp_sd self.trace_id += 1 content = [encode_le_addr(self.trace_id)] if faildescr: @@ -255,10 +257,11 @@ def log_trace(self, tag, metainterp_sd, mc, memo=None): if not self.cintf.jitlog_enabled(): return EMPTY_TRACE_LOG + assert self.metainterp_sd is not None assert isinstance(tag, int) if memo is None: memo = {} - return LogTrace(tag, memo, metainterp_sd, mc, self) + return LogTrace(tag, memo, self.metainterp_sd, mc, self) def log_patch_guard(self, descr_number, addr): if not self.cintf.jitlog_enabled(): From pypy.commits at gmail.com Tue Apr 26 07:35:40 2016 From: pypy.commits at gmail.com (william_ml_leslie) Date: Tue, 26 Apr 2016 04:35:40 -0700 (PDT) Subject: [pypy-commit] pypy verbose-imports: Log things if verbose flags are supplied Message-ID: <571f528c.0f801c0a.51e59.ffffc492@mx.google.com> Author: William ML Leslie Branch: verbose-imports Changeset: r83902:7a2a835311f1 Date: 2016-04-26 21:28 +1000 http://bitbucket.org/pypy/pypy/changeset/7a2a835311f1/ Log: Log things if verbose flags are supplied diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -55,6 +55,13 @@ return '.' + soabi + SO +def log_pyverbose(space, level, message): + w_flags = space.sys.get('flags') + w_verbose = space.getattr(w_flags, 'verbose') + if space.int_w(w_verbose) >= level: + w_stderr = space.sys.get('stderr') + space.call_method(w_stderr, "write", space.wrap(message)) + def file_exists(path): """Tests whether the given path is an existing regular file.""" return os.path.isfile(path) and case_ok(path) @@ -541,6 +548,7 @@ path = space.str0_w(w_pathitem) filepart = os.path.join(path, partname) + log_pyverbose(space, 2, "# trying %s" % (filepart,)) if os.path.isdir(filepart) and case_ok(filepart): initfile = os.path.join(filepart, '__init__') modtype, _, _ = find_modtype(space, initfile) @@ -585,6 +593,8 @@ def load_c_extension(space, filename, modulename): from pypy.module.cpyext.api import load_extension_module + log_pyverbose(space, 1, "import %s # from %s\n" % + (modulename, pathname)) load_extension_module(space, filename, modulename) # NB. cpyext.api.load_extension_module() can also delegate to _cffi_backend @@ -888,6 +898,11 @@ """ w = space.wrap + space.sys + + log_pyverbose(space, 1, "import %s # from %s\n" % + (space.str_w(w_modulename), pathname)) + if space.config.objspace.usepycfiles: src_stat = os.fstat(fd) cpathname = pathname + 'c' @@ -1016,6 +1031,9 @@ Load a module from a compiled file, execute it, and return its module object. """ + log_pyverbose(space, 1, "import %s # compiled from %s\n" % + (space.str_w(w_modulename), cpathname)) + if magic != get_pyc_magic(space): raise oefmt(space.w_ImportError, "Bad magic number in %s", cpathname) #print "loading pyc file:", cpathname From pypy.commits at gmail.com Tue Apr 26 08:05:45 2016 From: pypy.commits at gmail.com (william_ml_leslie) Date: Tue, 26 Apr 2016 05:05:45 -0700 (PDT) Subject: [pypy-commit] pypy verbose-imports: Log nothing if sys hasn't been imported yet Message-ID: <571f5999.82b71c0a.b4f8c.0153@mx.google.com> Author: William ML Leslie Branch: verbose-imports Changeset: r83903:408072cca2fc Date: 2016-04-26 22:03 +1000 http://bitbucket.org/pypy/pypy/changeset/408072cca2fc/ Log: Log nothing if sys hasn't been imported yet diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -56,9 +56,10 @@ return '.' + soabi + SO def log_pyverbose(space, level, message): - w_flags = space.sys.get('flags') - w_verbose = space.getattr(w_flags, 'verbose') - if space.int_w(w_verbose) >= level: + if space.sys.w_initialdict is None: + return # sys module not initialised, avoid recursion + w_verbose = space.sys.get_flag('verbose') + if w_verbose >= level: w_stderr = space.sys.get('stderr') space.call_method(w_stderr, "write", space.wrap(message)) From pypy.commits at gmail.com Tue Apr 26 08:40:46 2016 From: pypy.commits at gmail.com (william_ml_leslie) Date: Tue, 26 Apr 2016 05:40:46 -0700 (PDT) Subject: [pypy-commit] pypy verbose-imports: Tests for verbose flag functionality Message-ID: <571f61ce.8673c20a.f7427.ffff8c77@mx.google.com> Author: William ML Leslie Branch: verbose-imports Changeset: r83904:f4c6338b3293 Date: 2016-04-26 22:36 +1000 http://bitbucket.org/pypy/pypy/changeset/f4c6338b3293/ Log: Tests for verbose flag functionality diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -98,6 +98,8 @@ 'a=5\nb=6\rc="""hello\r\nworld"""\r', mode='wb') p.join('mod.py').write( 'a=15\nb=16\rc="""foo\r\nbar"""\r', mode='wb') + setuppkg("verbose1pkg", verbosemod='a = 1729') + setuppkg("verbose2pkg", verbosemod='a = 1729') # create compiled/x.py and a corresponding pyc file p = setuppkg("compiled", x = "x = 84") @@ -711,6 +713,54 @@ else: raise AssertionError("should have failed") + def test_verbose_flag_1(self): + output = [] + class StdErr(object): + def write(self, line): + output.append(line) + + import sys + old_flags = sys.flags + + class Flags(object): + verbose = 1 + def __getattr__(self, name): + return getattr(old_flags, name) + + sys.flags = Flags() + sys.stderr = StdErr() + try: + import verbose1pkg.verbosemod + finally: + reload(sys) + assert 'import verbose1pkg # from ' in output[-2] + assert 'import verbose1pkg.verbosemod # from ' in output[-1] + + def test_verbose_flag_2(self): + output = [] + class StdErr(object): + def write(self, line): + output.append(line) + + import sys + old_flags = sys.flags + + class Flags(object): + verbose = 2 + def __getattr__(self, name): + return getattr(old_flags, name) + + sys.flags = Flags() + sys.stderr = StdErr() + try: + import verbose2pkg.verbosemod + finally: + reload(sys) + assert any('import verbose2pkg # from ' in line + for line in output[:-2]) + assert output[-2].startswith('# trying') + assert 'import verbose2pkg.verbosemod # from ' in output[-1] + class TestAbi: def test_abi_tag(self): From pypy.commits at gmail.com Tue Apr 26 08:40:48 2016 From: pypy.commits at gmail.com (william_ml_leslie) Date: Tue, 26 Apr 2016 05:40:48 -0700 (PDT) Subject: [pypy-commit] pypy verbose-imports: Test verbose = 0, too Message-ID: <571f61d0.2a18c20a.6668b.fffff442@mx.google.com> Author: William ML Leslie Branch: verbose-imports Changeset: r83905:64f8ef4f4941 Date: 2016-04-26 22:39 +1000 http://bitbucket.org/pypy/pypy/changeset/64f8ef4f4941/ Log: Test verbose = 0, too diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -100,6 +100,7 @@ 'a=15\nb=16\rc="""foo\r\nbar"""\r', mode='wb') setuppkg("verbose1pkg", verbosemod='a = 1729') setuppkg("verbose2pkg", verbosemod='a = 1729') + setuppkg("verbose0pkg", verbosemod='a = 1729') # create compiled/x.py and a corresponding pyc file p = setuppkg("compiled", x = "x = 84") @@ -761,6 +762,20 @@ assert output[-2].startswith('# trying') assert 'import verbose2pkg.verbosemod # from ' in output[-1] + def test_verbose_flag_0(self): + output = [] + class StdErr(object): + def write(self, line): + output.append(line) + + import sys + sys.stderr = StdErr() + try: + import verbose0pkg.verbosemod + finally: + reload(sys) + assert not output + class TestAbi: def test_abi_tag(self): From pypy.commits at gmail.com Tue Apr 26 08:45:52 2016 From: pypy.commits at gmail.com (william_ml_leslie) Date: Tue, 26 Apr 2016 05:45:52 -0700 (PDT) Subject: [pypy-commit] pypy verbose-imports: Update pypy's usage string now that we have -v support Message-ID: <571f6300.43ecc20a.5713.ffff8d08@mx.google.com> Author: William ML Leslie Branch: verbose-imports Changeset: r83906:f56292db286f Date: 2016-04-26 22:44 +1000 http://bitbucket.org/pypy/pypy/changeset/f56292db286f/ Log: Update pypy's usage string now that we have -v support diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,7 +2,7 @@ # This is pure Python code that handles the main entry point into "pypy". # See test/test_app_main. -# Missing vs CPython: -d, -t, -v, -x, -3 +# Missing vs CPython: -d, -t, -x, -3 USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x @@ -19,6 +19,8 @@ -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE -S : don't imply 'import site' on initialization -u : unbuffered binary stdout and stderr; also PYTHONUNBUFFERED=x +-v : verbose (trace import statements); also PYTHONVERBOSE=x + can be supplied multiple times to increase verbosity -V : print the Python version number and exit (also --version) -W arg : warning control; arg is action:message:category:module:lineno also PYTHONWARNINGS=arg From pypy.commits at gmail.com Tue Apr 26 09:51:50 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 06:51:50 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Port force_from_effectinfo() Message-ID: <571f7276.2179c20a.60216.ffffa86d@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83908:218c5643fb3d Date: 2016-04-26 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/218c5643fb3d/ Log: Port force_from_effectinfo() diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -432,28 +432,30 @@ optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION def force_from_effectinfo(self, effectinfo): - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_set(fielddescr) - for arraydescr in effectinfo.readonly_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr) - for fielddescr in effectinfo.write_descrs_fields: - if fielddescr.is_always_pure(): - continue - try: - del self.cached_dict_reads[fielddescr] - except KeyError: - pass - self.force_lazy_set(fielddescr, can_cache=False) - for arraydescr in effectinfo.write_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr, can_cache=False) - if arraydescr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(arraydescr) + for fielddescr, cf in self.cached_fields.items(): + if effectinfo.check_readonly_descr_field(fielddescr): + cf.force_lazy_set(self, fielddescr) + elif effectinfo.check_write_descr_field(fielddescr): + if fielddescr.is_always_pure(): + continue try: - del self.cached_dict_reads[dictdescr] + del self.cached_dict_reads[fielddescr] except KeyError: - pass # someone did it already + pass + cf.force_lazy_set(self, fielddescr, can_cache=False) + # + for arraydescr, submap in self.cached_arrayitems.items(): + if effectinfo.check_readonly_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap) + elif effectinfo.check_write_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap, can_cache=False) + if arraydescr in self.corresponding_array_descrs: + dictdescr = self.corresponding_array_descrs.pop(arraydescr) + try: + del self.cached_dict_reads[dictdescr] + except KeyError: + pass # someone did it already + # if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_set(vrefinfo.descr_forced) @@ -476,6 +478,10 @@ if indexb is None or indexb.contains(idx): cf.force_lazy_set(self, None, can_cache) + def force_lazy_setarrayitem_submap(self, submap, can_cache=True): + for idx, cf in submap.iteritems(): + cf.force_lazy_set(self, None, can_cache) + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): From pypy.commits at gmail.com Tue Apr 26 09:51:52 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 06:51:52 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: More fixes. Remove the CUTOFF in backendopt/writeanalyze. Message-ID: <571f7278.41c8c20a.2d6b8.ffffab98@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83909:8d1271f7508d Date: 2016-04-26 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/8d1271f7508d/ Log: More fixes. Remove the CUTOFF in backendopt/writeanalyze. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -479,6 +479,9 @@ all_descrs.append(v) return all_descrs + def fetch_all_descrs(self): + return self.descrs.values() + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -174,6 +174,14 @@ result.call_release_gil_target = call_release_gil_target if result.check_can_raise(ignore_memoryerror=True): assert oopspecindex in cls._OS_CANRAISE + + if (result._write_descrs_arrays is not None and + len(result._write_descrs_arrays) == 1): + # this is used only for ARRAYCOPY operations + [result.single_write_descr_array] = result._write_descrs_arrays + else: + result.single_write_descr_array = None + cls._cache[key] = result return result @@ -190,9 +198,11 @@ return bitstring.bitcheck(self.bitstring_write_descrs_arrays, arraydescr.ei_index) def check_readonly_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far return bitstring.bitcheck(self.bitstring_readonly_descrs_interiorfields, interiorfielddescr.ei_index) def check_write_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far return bitstring.bitcheck(self.bitstring_write_descrs_interiorfields, interiorfielddescr.ei_index) diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -33,6 +33,7 @@ assert list(effectinfo._readonly_descrs_fields) == [('fielddescr', S, "a")] assert not effectinfo._write_descrs_fields assert not effectinfo._write_descrs_arrays + assert effectinfo.single_write_descr_array is None def test_include_write_field(): @@ -61,6 +62,7 @@ assert not effectinfo._readonly_descrs_fields assert not effectinfo._write_descrs_fields assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] + assert effectinfo.single_write_descr_array == ('arraydescr', A) def test_dont_include_read_and_write_field(): diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -209,7 +209,7 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(descr.get_extra_info().write_descrs_arrays) == 1): + descr.get_extra_info().single_write_descr_array is not None): # ARRAYCOPY with constant starts and constant length doesn't escape # its argument # XXX really? @@ -299,9 +299,9 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): - descr = effectinfo.write_descrs_arrays[0] + descr = effectinfo.single_write_descr_array cache = self.heap_array_cache.get(descr, None) srcstart = argboxes[3].getint() dststart = argboxes[4].getint() @@ -328,10 +328,10 @@ idx_cache._clear_cache_on_write(seen_allocation_of_target) return elif ( - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): # Fish the descr out of the effectinfo - cache = self.heap_array_cache.get(effectinfo.write_descrs_arrays[0], None) + cache = self.heap_array_cache.get(effectinfo.single_write_descr_array, None) if cache is not None: for idx, cache in cache.iteritems(): cache._clear_cache_on_write(seen_allocation_of_target) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -432,10 +432,14 @@ optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION def force_from_effectinfo(self, effectinfo): + # Note: this version of the code handles effectively + # effectinfos that store arbitrarily many descrs, by looping + # on self.cached_{fields, arrayitems} and looking them up in + # the bitstrings stored in the effectinfo. for fielddescr, cf in self.cached_fields.items(): if effectinfo.check_readonly_descr_field(fielddescr): cf.force_lazy_set(self, fielddescr) - elif effectinfo.check_write_descr_field(fielddescr): + if effectinfo.check_write_descr_field(fielddescr): if fielddescr.is_always_pure(): continue try: @@ -447,7 +451,7 @@ for arraydescr, submap in self.cached_arrayitems.items(): if effectinfo.check_readonly_descr_array(arraydescr): self.force_lazy_setarrayitem_submap(submap) - elif effectinfo.check_write_descr_array(arraydescr): + if effectinfo.check_write_descr_array(arraydescr): self.force_lazy_setarrayitem_submap(submap, can_cache=False) if arraydescr in self.corresponding_array_descrs: dictdescr = self.corresponding_array_descrs.pop(arraydescr) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -620,10 +620,10 @@ and length and ((dest_info and dest_info.is_virtual()) or length.getint() <= 8) and ((source_info and source_info.is_virtual()) or length.getint() <= 8) - and len(extrainfo.write_descrs_arrays) == 1): # <-sanity check + and extrainfo.single_write_descr_array is not None): #<-sanity check source_start = source_start_box.getint() dest_start = dest_start_box.getint() - arraydescr = extrainfo.write_descrs_arrays[0] + arraydescr = extrainfo.single_write_descr_array if arraydescr.is_array_of_structs(): return False # not supported right now diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import (TreeLoop, AbstractDescr, JitCellToken, TargetToken) from rpython.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists -from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.codewriter.effectinfo import EffectInfo, compute_bitstrings from rpython.jit.metainterp.logger import LogOperations from rpython.jit.tool.oparser import OpParser, pure_parse, convert_loop_to_trace from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr @@ -530,6 +530,7 @@ metainterp_sd.virtualref_info = self.vrefinfo if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection + compute_bitstrings(self.cpu.fetch_all_descrs()) # compile_data.enable_opts = self.enable_opts state = optimize_trace(metainterp_sd, None, compile_data) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -27,8 +27,12 @@ def __init__(self, extraeffect, oopspecindex, write_descrs_fields, write_descrs_arrays): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self._write_descrs_fields = write_descrs_fields + self._write_descrs_arrays = write_descrs_arrays + if len(write_descrs_arrays) == 1: + [self.single_write_descr_array] = write_descrs_arrays + else: + self.single_write_descr_array = None def has_random_effects(self): return self.extraeffect == self.EF_RANDOM_EFFECTS @@ -37,14 +41,14 @@ def __init__(self, extraeffect, oopspecindex=None, write_descrs_fields=[], write_descrs_arrays=[]): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self.__write_descrs_fields = write_descrs_fields + self.__write_descrs_arrays = write_descrs_arrays def get_extra_info(self): return FakeEffectinfo( self.extraeffect, self.oopspecindex, - write_descrs_fields=self.write_descrs_fields, - write_descrs_arrays=self.write_descrs_arrays, + write_descrs_fields=self.__write_descrs_fields, + write_descrs_arrays=self.__write_descrs_arrays, ) arraycopydescr1 = FakeCallDescr(FakeEffectinfo.EF_CANNOT_RAISE, FakeEffectinfo.OS_ARRAYCOPY, write_descrs_arrays=[descr1]) diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -4,7 +4,11 @@ top_set = object() empty_set = frozenset() -CUTOFF = 1000 +# CUTOFF is disabled, as it gave a strangely not-working-any-more effect +# if the size of the result grows past that bound. The main user was +# optimizeopt/heap.py (force_from_effectinfo), which has been rewritten +# to be happy with any size now. +#CUTOFF = 1000 class WriteAnalyzer(graphanalyze.GraphAnalyzer): def bottom_result(self): @@ -22,8 +26,8 @@ def add_to_result(self, result, other): if other is top_set: return top_set - if len(other) + len(result) > CUTOFF: - return top_set + #if len(other) + len(result) > CUTOFF: + # return top_set result.update(other) return result From pypy.commits at gmail.com Tue Apr 26 10:19:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 07:19:58 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Fix: this goes outside the other loop Message-ID: <571f790e.08121c0a.87a39.208f@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83910:176848faf166 Date: 2016-04-26 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/176848faf166/ Log: Fix: this goes outside the other loop diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -453,12 +453,13 @@ self.force_lazy_setarrayitem_submap(submap) if effectinfo.check_write_descr_array(arraydescr): self.force_lazy_setarrayitem_submap(submap, can_cache=False) - if arraydescr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(arraydescr) - try: - del self.cached_dict_reads[dictdescr] - except KeyError: - pass # someone did it already + # + for arraydescr, dictdescr in self.corresponding_array_descrs.items(): + if effectinfo.check_write_descr_array(arraydescr): + try: + del self.cached_dict_reads[dictdescr] + except KeyError: + pass # someone did it already # if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info @@ -483,7 +484,7 @@ cf.force_lazy_set(self, None, can_cache) def force_lazy_setarrayitem_submap(self, submap, can_cache=True): - for idx, cf in submap.iteritems(): + for cf in submap.itervalues(): cf.force_lazy_set(self, None, can_cache) def force_all_lazy_sets(self): From pypy.commits at gmail.com Tue Apr 26 10:29:48 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 07:29:48 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: test fix Message-ID: <571f7b5c.a82cc20a.44b20.ffffbe53@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83911:d1dd7b165726 Date: 2016-04-26 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/d1dd7b165726/ Log: test fix diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -624,7 +624,7 @@ pass def setup_descrs(self): - pass + return [] def get_latest_descr(self, deadframe): assert isinstance(deadframe, FakeDeadFrame) From pypy.commits at gmail.com Tue Apr 26 10:52:59 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 07:52:59 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Comments and tweaks Message-ID: <571f80cb.d81a1c0a.16653.4f0a@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83912:5f39c39b300f Date: 2016-04-26 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/5f39c39b300f/ Log: Comments and tweaks diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -440,6 +440,8 @@ for descr in all_descrs: if hasattr(descr, 'get_extra_info'): ei = descr.get_extra_info() + if ei is None: + continue if ei._readonly_descrs_fields is None: for key in descrs: assert getattr(ei, '_readonly_descrs_' + key) is None @@ -453,17 +455,33 @@ descrs[key].update(getattr(ei, '_write_descrs_' + key)) else: descr.ei_index = sys.maxint + for key in descrs: - mapping = {} + all_sets = [] for descr in descrs[key]: - assert descr.ei_index == sys.maxint # not modified yet eisetr = [ei for ei in effectinfos if descr in getattr(ei, '_readonly_descrs_' + key)] eisetw = [ei for ei in effectinfos if descr in getattr(ei, '_write_descrs_' + key)] + # these are the set of all ei such that this descr is in + # ei._readonly_descrs or ei._write_descrs eisetr = frozenset(eisetr) eisetw = frozenset(eisetw) + all_sets.append((descr, eisetr, eisetw)) + + # heuristic to reduce the total size of the bitstrings: start with + # numbering the descrs that are seen in many EffectInfos. If instead, + # by lack of chance, such a descr had a high number, then all these + # EffectInfos' bitstrings would need to store the same high number. + def size_of_both_sets((d, r, w)): + return len(r) + len(w) + all_sets.sort(key=size_of_both_sets, reverse=True) + + mapping = {} + for (descr, eisetr, eisetw) in all_sets: + assert descr.ei_index == sys.maxint # not modified yet descr.ei_index = mapping.setdefault((eisetr, eisetw), len(mapping)) + for ei in effectinfos: bitstrr = [descr.ei_index for descr in getattr(ei, '_readonly_descrs_' + key)] diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -1,3 +1,4 @@ +import sys from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, Symbolic @@ -87,9 +88,10 @@ class AbstractDescr(AbstractValue): - __slots__ = ('descr_index',) + __slots__ = ('descr_index', 'ei_index') llopaque = True descr_index = -1 + ei_index = sys.maxint def repr_of_descr(self): return '%r' % (self,) From pypy.commits at gmail.com Tue Apr 26 11:20:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Apr 2016 08:20:54 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: init merge point needs a counter before encoding the types, Message-ID: <571f8756.cbb81c0a.5133.3e23@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83913:ef963d113f77 Date: 2016-04-26 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/ef963d113f77/ Log: init merge point needs a counter before encoding the types, removed one wrong start_new_trace which started a trace too often! diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -512,7 +512,6 @@ def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type, orig_inpargs, memo): - metainterp_sd.jitlog.start_new_trace(metainterp_sd, None, type == "entry bridge") forget_optimization_info(loop.operations) forget_optimization_info(loop.inputargs) vinfo = jitdriver_sd.virtualizable_info diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -188,6 +188,24 @@ for mark, in marks: print '%s = chr(%s)' % ('MARK_' + mark, hex(globals()['MARK_' + mark])) print 'MARK_JITLOG_END = chr(%s)' % hex(start) + for key,value in locals().items(): + if key.startswith("MP_"): + print '%s = (%s,"%s")' % (key, hex(value[0]), value[1]) + print 'SEM_TYPE_NAMES = {' + for key,value in locals().items(): + if key.startswith("MP_") and value[0] != 0: + print ' %s: "%s",' % (hex(value[0]), key[3:].lower()) + print '}' + +MP_STR = (0x0, "s") +MP_INT = (0x0, "i") + +# concrete parameters +MP_FILENAME = (0x1, "s") +MP_LINENO = (0x2, "i") +MP_INDEX = (0x4, "i") +MP_SCOPE = (0x8, "s") +MP_OPCODE = (0x10, "s") del marks del start @@ -391,7 +409,8 @@ for i, (semantic_type, generic_type) in enumerate(types): encoded_types.append(chr(semantic_type)) encoded_types.append(generic_type) - log._write_marked(MARK_INIT_MERGE_POINT, ''.join(encoded_types)) + count = encode_le_16bit(len(types)) + log._write_marked(MARK_INIT_MERGE_POINT, count + ''.join(encoded_types)) # the types have already been written encoded = encode_merge_point(log, self.common_prefix, values) From pypy.commits at gmail.com Tue Apr 26 11:29:25 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 08:29:25 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: More kills Message-ID: <571f8955.442cc20a.75921.ffffdcb7@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83914:e4d43e895c35 Date: 2016-04-26 17:17 +0200 http://bitbucket.org/pypy/pypy/changeset/e4d43e895c35/ Log: More kills diff --git a/rpython/jit/codewriter/flatten.py b/rpython/jit/codewriter/flatten.py --- a/rpython/jit/codewriter/flatten.py +++ b/rpython/jit/codewriter/flatten.py @@ -103,7 +103,7 @@ self.seen_blocks = {} self.make_bytecode_block(self.graph.startblock) - def make_bytecode_block(self, block, handling_ovf=False): + def make_bytecode_block(self, block): if block.exits == (): self.make_return(block.inputargs) return @@ -117,15 +117,10 @@ # operations = block.operations for i, op in enumerate(operations): - if '_ovf' in op.opname: - if (len(block.exits) not in (2, 3) or - block.exitswitch is not c_last_exception): - raise Exception("detected a block containing ovfcheck()" - " but no OverflowError is caught, this" - " is not legal in jitted blocks") + assert '_ovf' not in op.opname # should not exist any more self.serialize_op(op) # - self.insert_exits(block, handling_ovf) + self.insert_exits(block) def make_return(self, args): if len(args) == 1: @@ -145,16 +140,16 @@ raise Exception("?") self.emitline("---") - def make_link(self, link, handling_ovf): + def make_link(self, link): if (link.target.exits == () and link.last_exception not in link.args and link.last_exc_value not in link.args): self.make_return(link.args) # optimization only return self.insert_renamings(link) - self.make_bytecode_block(link.target, handling_ovf) + self.make_bytecode_block(link.target) - def make_exception_link(self, link, handling_ovf): + def make_exception_link(self, link): # Like make_link(), but also introduces the 'last_exception' and # 'last_exc_value' as variables if needed. Also check if the link # is jumping directly to the re-raising exception block. @@ -162,52 +157,31 @@ assert link.last_exc_value is not None if link.target.operations == () and link.args == [link.last_exception, link.last_exc_value]: - if handling_ovf: - exc_data = self.cpu.rtyper.exceptiondata - ll_ovf = exc_data.get_standard_ll_exc_instance_by_class( - OverflowError) - c = Constant(ll_ovf, concretetype=lltype.typeOf(ll_ovf)) - self.emitline("raise", c) - else: - self.emitline("reraise") + self.emitline("reraise") self.emitline("---") return # done - self.make_link(link, handling_ovf) + self.make_link(link) - def insert_exits(self, block, handling_ovf=False): + def insert_exits(self, block): if len(block.exits) == 1: # A single link, fall-through link = block.exits[0] assert link.exitcase in (None, False, True) # the cases False or True should not really occur, but can show # up in the manually hacked graphs for generators... - self.make_link(link, handling_ovf) + self.make_link(link) # elif block.canraise: # An exception block. See test_exc_exitswitch in test_flatten.py # for an example of what kind of code this makes. index = -1 opname = block.operations[index].opname - if '_ovf' in opname: - # ovf checking operation as a lat thing, -live- should be - # one before it - line = self.popline() - self.emitline(opname[:7] + '_jump_if_ovf', - TLabel(block.exits[1]), *line[1:]) - assert len(block.exits) in (2, 3) - self.make_link(block.exits[0], False) - self.emitline(Label(block.exits[1])) - self.make_exception_link(block.exits[1], True) - if len(block.exits) == 3: - assert block.exits[2].exitcase is Exception - self.make_exception_link(block.exits[2], False) - return - else: - while True: - lastopname = block.operations[index].opname - if lastopname != '-live-': - break - index -= 1 + assert '_ovf' not in opname # should not exist any more + while True: + lastopname = block.operations[index].opname + if lastopname != '-live-': + break + index -= 1 assert block.exits[0].exitcase is None # is this always True? # if not self._include_all_exc_links: @@ -261,10 +235,10 @@ #if not livebefore: # self.emitline('-live-', TLabel(linkfalse)) # true path: - self.make_link(linktrue, handling_ovf) + self.make_link(linktrue) # false path: self.emitline(Label(linkfalse)) - self.make_link(linkfalse, handling_ovf) + self.make_link(linkfalse) # else: # A switch. @@ -287,7 +261,7 @@ switchdict) # emit the default path if block.exits[-1].exitcase == 'default': - self.make_link(block.exits[-1], handling_ovf) + self.make_link(block.exits[-1]) else: self.emitline("unreachable") self.emitline("---") @@ -301,7 +275,7 @@ # if the switched value doesn't match any case. self.emitline(Label(switch)) self.emitline('-live-') - self.make_link(switch, handling_ovf) + self.make_link(switch) def insert_renamings(self, link): renamings = {} diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -333,17 +333,6 @@ rewrite_op_float_gt = _rewrite_symmetric rewrite_op_float_ge = _rewrite_symmetric - def rewrite_op_int_add_ovf(self, op): - op0 = self._rewrite_symmetric(op) - op1 = SpaceOperation('-live-', [], None) - return [op1, op0] - - rewrite_op_int_mul_ovf = rewrite_op_int_add_ovf - - def rewrite_op_int_sub_ovf(self, op): - op1 = SpaceOperation('-live-', [], None) - return [op1, op] - def _noop_rewrite(self, op): return op @@ -518,23 +507,12 @@ # XXX some of the following functions should not become residual calls # but be really compiled - rewrite_op_int_floordiv_ovf_zer = _do_builtin_call - rewrite_op_int_floordiv_ovf = _do_builtin_call - rewrite_op_int_floordiv_zer = _do_builtin_call - rewrite_op_int_mod_ovf_zer = _do_builtin_call - rewrite_op_int_mod_ovf = _do_builtin_call - rewrite_op_int_mod_zer = _do_builtin_call - rewrite_op_int_lshift_ovf = _do_builtin_call rewrite_op_int_abs = _do_builtin_call rewrite_op_llong_abs = _do_builtin_call rewrite_op_llong_floordiv = _do_builtin_call - rewrite_op_llong_floordiv_zer = _do_builtin_call rewrite_op_llong_mod = _do_builtin_call - rewrite_op_llong_mod_zer = _do_builtin_call rewrite_op_ullong_floordiv = _do_builtin_call - rewrite_op_ullong_floordiv_zer = _do_builtin_call rewrite_op_ullong_mod = _do_builtin_call - rewrite_op_ullong_mod_zer = _do_builtin_call rewrite_op_gc_identityhash = _do_builtin_call rewrite_op_gc_id = _do_builtin_call rewrite_op_gc_pin = _do_builtin_call @@ -1499,7 +1477,6 @@ for _old, _new in [('bool_not', 'int_is_zero'), ('cast_bool_to_float', 'cast_int_to_float'), - ('int_add_nonneg_ovf', 'int_add_ovf'), ('keepalive', '-live-'), ('char_lt', 'int_lt'), @@ -1532,12 +1509,6 @@ return self.rewrite_operation(op1) ''' % (_old, _new)).compile() - def rewrite_op_int_neg_ovf(self, op): - op1 = SpaceOperation('int_sub_ovf', - [Constant(0, lltype.Signed), op.args[0]], - op.result) - return self.rewrite_operation(op1) - def rewrite_op_float_is_true(self, op): op1 = SpaceOperation('float_ne', [op.args[0], Constant(0.0, lltype.Float)], diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -243,45 +243,6 @@ return llop.jit_force_virtual(lltype.typeOf(inst), inst) -def _ll_2_int_floordiv_ovf_zer(x, y): - if y == 0: - raise ZeroDivisionError - return _ll_2_int_floordiv_ovf(x, y) - -def _ll_2_int_floordiv_ovf(x, y): - # intentionally not short-circuited to produce only one guard - # and to remove the check fully if one of the arguments is known - if (x == -sys.maxint - 1) & (y == -1): - raise OverflowError - return llop.int_floordiv(lltype.Signed, x, y) - -def _ll_2_int_floordiv_zer(x, y): - if y == 0: - raise ZeroDivisionError - return llop.int_floordiv(lltype.Signed, x, y) - -def _ll_2_int_mod_ovf_zer(x, y): - if y == 0: - raise ZeroDivisionError - return _ll_2_int_mod_ovf(x, y) - -def _ll_2_int_mod_ovf(x, y): - #see comment in _ll_2_int_floordiv_ovf - if (x == -sys.maxint - 1) & (y == -1): - raise OverflowError - return llop.int_mod(lltype.Signed, x, y) - -def _ll_2_int_mod_zer(x, y): - if y == 0: - raise ZeroDivisionError - return llop.int_mod(lltype.Signed, x, y) - -def _ll_2_int_lshift_ovf(x, y): - result = x << y - if (result >> y) != x: - raise OverflowError - return result - def _ll_1_int_abs(x): # this version doesn't branch mask = x >> (LONG_BIT - 1) @@ -455,48 +416,21 @@ def _ll_2_llong_floordiv(xll, yll): return llop.llong_floordiv(lltype.SignedLongLong, xll, yll) -def _ll_2_llong_floordiv_zer(xll, yll): - if yll == 0: - raise ZeroDivisionError - return llop.llong_floordiv(lltype.SignedLongLong, xll, yll) - def _ll_2_llong_mod(xll, yll): return llop.llong_mod(lltype.SignedLongLong, xll, yll) -def _ll_2_llong_mod_zer(xll, yll): - if yll == 0: - raise ZeroDivisionError - return llop.llong_mod(lltype.SignedLongLong, xll, yll) - def _ll_2_ullong_floordiv(xll, yll): return llop.ullong_floordiv(lltype.UnsignedLongLong, xll, yll) -def _ll_2_ullong_floordiv_zer(xll, yll): - if yll == 0: - raise ZeroDivisionError - return llop.ullong_floordiv(lltype.UnsignedLongLong, xll, yll) - def _ll_2_ullong_mod(xll, yll): return llop.ullong_mod(lltype.UnsignedLongLong, xll, yll) -def _ll_2_ullong_mod_zer(xll, yll): - if yll == 0: - raise ZeroDivisionError - return llop.ullong_mod(lltype.UnsignedLongLong, xll, yll) - def _ll_2_uint_mod(xll, yll): return llop.uint_mod(lltype.Unsigned, xll, yll) # in the following calls to builtins, the JIT is allowed to look inside: inline_calls_to = [ - ('int_floordiv_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_floordiv_ovf', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_floordiv_zer', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_mod_ovf_zer', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_mod_ovf', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_mod_zer', [lltype.Signed, lltype.Signed], lltype.Signed), - ('int_lshift_ovf', [lltype.Signed, lltype.Signed], lltype.Signed), ('int_abs', [lltype.Signed], lltype.Signed), ('ll_math.ll_math_sqrt', [lltype.Float], lltype.Float), ] diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -1063,95 +1063,6 @@ def op_track_alloc_stop(self, addr): checkadr(addr) - # ____________________________________________________________ - # Overflow-detecting variants - - def op_int_neg_ovf(self, x): - assert is_valid_int(x) - try: - return ovfcheck(-x) - except OverflowError: - self.make_llexception() - - def op_int_abs_ovf(self, x): - assert is_valid_int(x) - try: - return ovfcheck(abs(x)) - except OverflowError: - self.make_llexception() - - def op_int_lshift_ovf(self, x, y): - assert is_valid_int(x) - assert is_valid_int(y) - try: - return ovfcheck(x << y) - except OverflowError: - self.make_llexception() - - def _makefunc2(fn, operator, xtype, ytype=None): - import sys - d = sys._getframe(1).f_locals - if ytype is None: - ytype = xtype - if '_ovf' in fn: - checkfn = 'ovfcheck' - elif fn.startswith('op_int_'): - checkfn = 'intmask' - else: - checkfn = '' - if operator == '//': - code = '''r = %(checkfn)s(x // y) - if x^y < 0 and x%%y != 0: - r += 1 - return r - ''' % locals() - elif operator == '%': - ## overflow check on % does not work with emulated int - code = '''%(checkfn)s(x // y) - r = x %% y - if x^y < 0 and x%%y != 0: - r -= y - return r - ''' % locals() - else: - code = 'return %(checkfn)s(x %(operator)s y)' % locals() - exec py.code.Source(""" - def %(fn)s(self, x, y): - assert isinstance(x, %(xtype)s) - assert isinstance(y, %(ytype)s) - try: - %(code)s - except (OverflowError, ValueError, ZeroDivisionError): - self.make_llexception() - """ % locals()).compile() in globals(), d - - _makefunc2('op_int_add_ovf', '+', '(int, long, llmemory.AddressOffset)') - _makefunc2('op_int_mul_ovf', '*', '(int, long, llmemory.AddressOffset)', '(int, long)') - _makefunc2('op_int_sub_ovf', '-', '(int, long)') - _makefunc2('op_int_floordiv_ovf', '//', '(int, long)') # XXX negative args - _makefunc2('op_int_floordiv_zer', '//', '(int, long)') # can get off-by-one - _makefunc2('op_int_floordiv_ovf_zer', '//', '(int, long)') # (see op_int_floordiv) - _makefunc2('op_int_mod_ovf', '%', '(int, long)') - _makefunc2('op_int_mod_zer', '%', '(int, long)') - _makefunc2('op_int_mod_ovf_zer', '%', '(int, long)') - - _makefunc2('op_uint_floordiv_zer', '//', 'r_uint') - _makefunc2('op_uint_mod_zer', '%', 'r_uint') - - _makefunc2('op_llong_floordiv_zer', '//', 'r_longlong') - _makefunc2('op_llong_mod_zer', '%', 'r_longlong') - - _makefunc2('op_ullong_floordiv_zer', '//', 'r_ulonglong') - _makefunc2('op_ullong_mod_zer', '%', 'r_ulonglong') - - _makefunc2('op_lllong_floordiv_zer', '//', 'r_longlonglong') - _makefunc2('op_lllong_mod_zer', '%', 'r_longlonglong') - - def op_int_add_nonneg_ovf(self, x, y): - if isinstance(y, int): - assert y >= 0 - return self.op_int_add_ovf(x, y) - def op_int_is_true(self, x): # special case if type(x) is CDefinedIntSymbolic: From pypy.commits at gmail.com Tue Apr 26 11:29:27 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 08:29:27 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: progress, and goal Message-ID: <571f8957.47afc20a.2f58b.ffffe377@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83915:d29372e66a6e Date: 2016-04-26 17:29 +0200 http://bitbucket.org/pypy/pypy/changeset/d29372e66a6e/ Log: progress, and goal diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -3,7 +3,8 @@ from rpython.annotator import model as annmodel from rpython.flowspace.operation import op_appendices from rpython.rlib import objectmodel, jit -from rpython.rlib.rarithmetic import intmask, r_int, r_longlong +from rpython.rlib.rarithmetic import intmask, longlongmask, r_int, r_longlong +from rpython.rlib.rarithmetic import r_uint, r_ulonglong from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, Char, UniChar, UnsignedLongLong, SignedLongLong, build_number, Number, @@ -99,10 +100,11 @@ if hop.s_result.unsigned: raise TyperError("forbidden uint_abs_ovf") else: - vlist = hop.inputargs(self) + [v_arg] = hop.inputargs(self) hop.has_implicit_exception(OverflowError) # record we know about it hop.exception_is_here() - return hop.genop(self.opprefix + 'abs_ovf', vlist, resulttype=self) + llfunc = globals()['ll_' + self.opprefix + 'abs_ovf'] + return hop.gendirectcall(llfunc, v_arg) def rtype_invert(self, hop): self = self.as_int @@ -127,10 +129,12 @@ hop.exception_cannot_occur() return self.rtype_neg(hop) else: - vlist = hop.inputargs(self) + [v_arg] = hop.inputargs(self) hop.has_implicit_exception(OverflowError) # record we know about it hop.exception_is_here() - return hop.genop(self.opprefix + 'neg_ovf', vlist, resulttype=self) + llfunc = globals()['ll_' + self.opprefix + 'sub_ovf'] + c_zero = hop.inputconst(self.lowleveltype, 0) + return hop.gendirectcall(llfunc, c_zero, v_arg) def rtype_pos(self, hop): self = self.as_int @@ -357,6 +361,8 @@ INT_BITS_1 = r_int.BITS - 1 LLONG_BITS_1 = r_longlong.BITS - 1 +INT_MIN = int(-(1 << INT_BITS_1)) +LLONG_MIN = r_longlong(-(1 << LLONG_BITS_1)) def ll_correct_int_floordiv(x, y, r): p = r * y @@ -381,6 +387,31 @@ return r + (y & (u >> LLONG_BITS_1)) + at jit.oopspec("sub_ovf") +def ll_int_sub_ovf(x, y): + r = intmask(r_uint(x) - r_uint(y)) + if r^x >= 0 or r^~y >= 0: + return r + raise OverflowError("integer subtraction") + + at jit.oopspec("sub_ovf") +def ll_llong_sub_ovf(x, y): + r = longlongmask(r_ulonglong(x) - r_ulonglong(y)) + if r^x >= 0 or r^~y >= 0: + return r + raise OverflowError("longlong subtraction") + +def ll_int_abs_ovf(x): + if x == INT_MIN: + raise OverflowError + return abs(x) + +def ll_llong_abs_ovf(x): + if x == LLONG_MIN: + raise OverflowError + return abs(x) + + #Helper functions for comparisons def _rtype_compare_template(hop, func): From pypy.commits at gmail.com Tue Apr 26 12:49:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 09:49:39 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: Port the logic to rint.py Message-ID: <571f9c23.6a70c20a.745cb.ffff8d4b@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83916:90b064ce2c83 Date: 2016-04-26 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/90b064ce2c83/ Log: Port the logic to rint.py diff --git a/rpython/rtyper/raisingops.py b/rpython/rtyper/raisingops.py deleted file mode 100644 --- a/rpython/rtyper/raisingops.py +++ /dev/null @@ -1,295 +0,0 @@ -import sys -from rpython.rlib.rarithmetic import r_longlong, r_uint, intmask -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.lltypesystem.lltype import Signed, SignedLongLong, \ - UnsignedLongLong - -#XXX original SIGNED_RIGHT_SHIFT_ZERO_FILLS not taken into account -#XXX assuming HAVE_LONG_LONG (int_mul_ovf) -#XXX should int_mod and int_floordiv return an intmask(...) instead? - -LONG_MAX = sys.maxint -LONG_MIN = -sys.maxint-1 - -LLONG_MAX = r_longlong(2 ** (r_longlong.BITS-1) - 1) -LLONG_MIN = -LLONG_MAX-1 - -def int_floordiv_zer(x, y): - '''#define OP_INT_FLOORDIV_ZER(x,y,r,err) \ - if ((y)) { OP_INT_FLOORDIV(x,y,r,err); } \ - else FAIL_ZER(err, "integer division") - ''' - if y: - return llop.int_floordiv(Signed, x, y) - else: - raise ZeroDivisionError("integer division") - -def uint_floordiv_zer(x, y): - '''#define OP_UINT_FLOORDIV_ZER(x,y,r,err) \ - if ((y)) { OP_UINT_FLOORDIV(x,y,r,err); } \ - else FAIL_ZER(err, "unsigned integer division") - ''' - if y: - return x / y - else: - raise ZeroDivisionError("unsigned integer division") - -def llong_floordiv_zer(x, y): - '''#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") - ''' - if y: - return llop.llong_floordiv(SignedLongLong, x, y) - else: - raise ZeroDivisionError("integer division") - -def ullong_floordiv_zer(x, y): - '''#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") - ''' - if y: - return llop.llong_floordiv(UnsignedLongLong, x, y) - else: - raise ZeroDivisionError("unsigned integer division") - - -def int_neg_ovf(x): - if x == LONG_MIN: - raise OverflowError("integer negate") - return -x - -def llong_neg_ovf(x): - if x == LLONG_MIN: - raise OverflowError("integer negate") - return -x - -def int_abs_ovf(x): - if x == LONG_MIN: - raise OverflowError("integer absolute") - if x < 0: - return -x - else: - return x - -def llong_abs_ovf(x): - if x == LLONG_MIN: - raise OverflowError("integer absolute") - if x < 0: - return -x - else: - return x - -def int_add_ovf(x, y): - '''#define OP_INT_ADD_OVF(x,y,r,err) \ - OP_INT_ADD(x,y,r,err); \ - if ((r^(x)) >= 0 || (r^(y)) >= 0); \ - else FAIL_OVF(err, "integer addition") - ''' - r = intmask(r_uint(x) + r_uint(y)) - if r^x >= 0 or r^y >= 0: - return r - else: - raise OverflowError("integer addition") - -def int_add_nonneg_ovf(x, y): - ''' - OP_INT_ADD(x,y,r); \ - if (r >= (x)); \ - else FAIL_OVF("integer addition") - ''' - r = intmask(r_uint(x) + r_uint(y)) - if r >= x: - return r - else: - raise OverflowError("integer addition") - -def int_sub_ovf(x, y): - '''#define OP_INT_SUB_OVF(x,y,r,err) \ - OP_INT_SUB(x,y,r,err); \ - if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ - else FAIL_OVF(err, "integer subtraction") - ''' - r = intmask(r_uint(x) - r_uint(y)) - if r^x >= 0 or r^~y >= 0: - return r - else: - raise OverflowError("integer subtraction") - -def int_lshift_ovf(x, y): - '''#define OP_INT_LSHIFT_OVF(x,y,r,err) \ - OP_INT_LSHIFT(x,y,r,err); \ - if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ - FAIL_OVF(err, "x<= 0) { OP_INT_RSHIFT(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return _Py_ARITHMETIC_RIGHT_SHIFT(x, y) - else: - raise ValueError("negative shift count") - -def int_lshift_val(x, y): - '''#define OP_INT_LSHIFT_VAL(x,y,r,err) \ - if ((y) >= 0) { OP_INT_LSHIFT(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return x << y - else: - raise ValueError("negative shift count") - -def int_lshift_ovf_val(x, y): - '''#define OP_INT_LSHIFT_OVF_VAL(x,y,r,err) \ - if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return int_lshift_ovf(x, y) - else: - raise ValueError("negative shift count") - -def int_floordiv_ovf(x, y): - '''#define OP_INT_FLOORDIV_OVF(x,y,r,err) \ - if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ - FAIL_OVF(err, "integer division"); \ - OP_INT_FLOORDIV(x,y,r,err) - ''' - if y == -1 and x < 0 and (r_uint(x) << 1) == 0: - raise OverflowError("integer division") - else: - return llop.int_floordiv(Signed, x, y) - -def int_floordiv_ovf_zer(x, y): - '''#define OP_INT_FLOORDIV_OVF_ZER(x,y,r,err) \ - if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r,err); } \ - else FAIL_ZER(err, "integer division") - ''' - if y: - return int_floordiv_ovf(x, y) - else: - raise ZeroDivisionError("integer division") - -def int_mod_ovf(x, y): - '''#define OP_INT_MOD_OVF(x,y,r,err) \ - if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ - FAIL_OVF(err, "integer modulo"); \ - OP_INT_MOD(x,y,r,err) - ''' - if y == -1 and x < 0 and (r_uint(x) << 1) == 0: - raise OverflowError("integer modulo") - else: - return llop.int_mod(Signed, x, y) - -def int_mod_zer(x, y): - '''#define OP_INT_MOD_ZER(x,y,r,err) \ - if ((y)) { OP_INT_MOD(x,y,r,err); } \ - else FAIL_ZER(err, "integer modulo") - ''' - if y: - return llop.int_mod(Signed, x, y) - else: - raise ZeroDivisionError("integer modulo") - -def uint_mod_zer(x, y): - '''#define OP_UINT_MOD_ZER(x,y,r,err) \ - if ((y)) { OP_UINT_MOD(x,y,r,err); } \ - else FAIL_ZER(err, "unsigned integer modulo") - ''' - if y: - return x % y - else: - raise ZeroDivisionError("unsigned integer modulo") - -def int_mod_ovf_zer(x, y): - '''#define OP_INT_MOD_OVF_ZER(x,y,r,err) \ - if ((y)) { OP_INT_MOD_OVF(x,y,r,err); } \ - else FAIL_ZER(err, "integer modulo") - ''' - if y: - return int_mod_ovf(x, y) - else: - raise ZeroDivisionError("integer modulo") - -def llong_mod_zer(x, y): - '''#define OP_LLONG_MOD_ZER(x,y,r) \ - if ((y)) { OP_LLONG_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") - ''' - if y: - return llop.int_mod(SignedLongLong, x, y) - else: - raise ZeroDivisionError("integer modulo") - -# Helpers... - -def _Py_ARITHMETIC_RIGHT_SHIFT(i, j): - ''' -// Py_ARITHMETIC_RIGHT_SHIFT -// C doesn't define whether a right-shift of a signed integer sign-extends -// or zero-fills. Here a macro to force sign extension: -// Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) -// Return I >> J, forcing sign extension. -// Requirements: -// I is of basic signed type TYPE (char, short, int, long, or long long). -// TYPE is one of char, short, int, long, or long long, although long long -// must not be used except on platforms that support it. -// J is an integer >= 0 and strictly less than the number of bits in TYPE -// (because C doesn't define what happens for J outside that range either). -// Caution: -// I may be evaluated more than once. - -#ifdef SIGNED_RIGHT_SHIFT_ZERO_FILLS - #define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) \ - ((I) < 0 ? ~((~(unsigned TYPE)(I)) >> (J)) : (I) >> (J)) -#else - #define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) ((I) >> (J)) -#endif - ''' - return i >> j - -#XXX some code from src/int.h seems missing -#def int_mul_ovf(x, y): #HAVE_LONG_LONG version -# '''{ \ -# PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ -# r = (long)lr; \ -# if ((PY_LONG_LONG)r == lr); \ -# else FAIL_OVF(err, "integer multiplication"); \ -# } -# ''' -# lr = r_longlong(x) * r_longlong(y); -# r = intmask(lr) -# if r_longlong(r) == lr: -# return r -# else: -# raise OverflowError("integer multiplication") - -#not HAVE_LONG_LONG version -def int_mul_ovf(a, b): #long a, long b, long *longprod): - longprod = a * b - doubleprod = float(a) * float(b) - doubled_longprod = float(longprod) - - # Fast path for normal case: small multiplicands, and no info is lost in either method. - if doubled_longprod == doubleprod: - return longprod - - # Somebody somewhere lost info. Close enough, or way off? Note - # that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). - # The difference either is or isn't significant compared to the - # true value (of which doubleprod is a good approximation). - # absdiff/absprod <= 1/32 iff 32 * absdiff <= absprod -- 5 good bits is "close enough" - if 32.0 * abs(doubled_longprod - doubleprod) <= abs(doubleprod): - return longprod - - raise OverflowError("integer multiplication") diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -4,7 +4,7 @@ from rpython.flowspace.operation import op_appendices from rpython.rlib import objectmodel, jit from rpython.rlib.rarithmetic import intmask, longlongmask, r_int, r_longlong -from rpython.rlib.rarithmetic import r_uint, r_ulonglong +from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlonglong from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, Char, UniChar, UnsignedLongLong, SignedLongLong, build_number, Number, @@ -12,6 +12,8 @@ from rpython.rtyper.rfloat import FloatRepr from rpython.rtyper.rmodel import inputconst, log from rpython.tool.pairtype import pairtype +from rpython.rtyper.lltypesystem.lloperation import llop + class IntegerRepr(FloatRepr): def __init__(self, lowleveltype, opprefix): @@ -100,11 +102,7 @@ if hop.s_result.unsigned: raise TyperError("forbidden uint_abs_ovf") else: - [v_arg] = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - llfunc = globals()['ll_' + self.opprefix + 'abs_ovf'] - return hop.gendirectcall(llfunc, v_arg) + return _rtype_call_helper(hop, 'abs_ovf') def rtype_invert(self, hop): self = self.as_int @@ -129,12 +127,7 @@ hop.exception_cannot_occur() return self.rtype_neg(hop) else: - [v_arg] = hop.inputargs(self) - hop.has_implicit_exception(OverflowError) # record we know about it - hop.exception_is_here() - llfunc = globals()['ll_' + self.opprefix + 'sub_ovf'] - c_zero = hop.inputconst(self.lowleveltype, 0) - return hop.gendirectcall(llfunc, c_zero, v_arg) + return _rtype_call_helper(hop, 'neg_ovf') def rtype_pos(self, hop): self = self.as_int @@ -226,28 +219,28 @@ hop = hop.copy() hop.swap_fst_snd_args() func = 'add_nonneg_ovf' - return _rtype_template(hop, func) + return _rtype_call_helper(hop, func) def rtype_sub(_, hop): return _rtype_template(hop, 'sub') rtype_inplace_sub = rtype_sub def rtype_sub_ovf(_, hop): - return _rtype_template(hop, 'sub_ovf') + return _rtype_call_helper(hop, 'sub_ovf') def rtype_mul(_, hop): return _rtype_template(hop, 'mul') rtype_inplace_mul = rtype_mul def rtype_mul_ovf(_, hop): - return _rtype_template(hop, 'mul_ovf') + return _rtype_call_helper(hop, 'mul_ovf') def rtype_floordiv(_, hop): - return _rtype_template(hop, 'floordiv', [ZeroDivisionError]) + return _rtype_call_helper(hop, 'floordiv', [ZeroDivisionError]) rtype_inplace_floordiv = rtype_floordiv def rtype_floordiv_ovf(_, hop): - return _rtype_template(hop, 'floordiv_ovf', [ZeroDivisionError]) + return _rtype_call_helper(hop, 'floordiv_ovf', [ZeroDivisionError]) # turn 'div' on integers into 'floordiv' rtype_div = rtype_floordiv @@ -257,11 +250,11 @@ # 'def rtype_truediv' is delegated to the superclass FloatRepr def rtype_mod(_, hop): - return _rtype_template(hop, 'mod', [ZeroDivisionError]) + return _rtype_call_helper(hop, 'mod', [ZeroDivisionError]) rtype_inplace_mod = rtype_mod def rtype_mod_ovf(_, hop): - return _rtype_template(hop, 'mod_ovf', [ZeroDivisionError]) + return _rtype_call_helper(hop, 'mod_ovf', [ZeroDivisionError]) def rtype_xor(_, hop): return _rtype_template(hop, 'xor') @@ -280,7 +273,7 @@ rtype_inplace_lshift = rtype_lshift def rtype_lshift_ovf(_, hop): - return _rtype_template(hop, 'lshift_ovf') + return _rtype_call_helper(hop, 'lshift_ovf') def rtype_rshift(_, hop): return _rtype_template(hop, 'rshift') @@ -310,17 +303,13 @@ #Helper functions -def _rtype_template(hop, func, implicit_excs=[]): - if func.endswith('_ovf'): - if hop.s_result.unsigned: - raise TyperError("forbidden unsigned " + func) - else: - hop.has_implicit_exception(OverflowError) - - for implicit_exc in implicit_excs: - if hop.has_implicit_exception(implicit_exc): - appendix = op_appendices[implicit_exc] - func += '_' + appendix +def _rtype_template(hop, func): + """Write a simple operation implementing the given 'func'. + It must be an operation that cannot raise. + """ + if '_ovf' in func or (func.startswith(('mod', 'floordiv')) + and not hop.s_result.unsigned): + raise TyperError("%r should not be used here any more" % (func,)) r_result = hop.r_result if r_result.lowleveltype == Bool: @@ -332,74 +321,269 @@ else: repr2 = repr vlist = hop.inputargs(repr, repr2) - hop.exception_is_here() + hop.exception_cannot_occur() prefix = repr.opprefix - v_res = hop.genop(prefix+func, vlist, resulttype=repr) - bothnonneg = hop.args_s[0].nonneg and hop.args_s[1].nonneg - if prefix in ('int_', 'llong_') and not bothnonneg: - - # cpython, and rpython, assumed that integer division truncates - # towards -infinity. however, in C99 and most (all?) other - # backends, integer division truncates towards 0. so assuming - # that, we call a helper function that applies the necessary - # correction in the right cases. - - op = func.split('_', 1)[0] - - if op == 'floordiv': - llfunc = globals()['ll_correct_' + prefix + 'floordiv'] - v_res = hop.gendirectcall(llfunc, vlist[0], vlist[1], v_res) - elif op == 'mod': - llfunc = globals()['ll_correct_' + prefix + 'mod'] - v_res = hop.gendirectcall(llfunc, vlist[1], v_res) - v_res = hop.llops.convertvar(v_res, repr, r_result) return v_res +def _rtype_call_helper(hop, func, implicit_excs=[]): + """Write a call to a helper implementing the given 'func'. + It can raise OverflowError if 'func' ends with '_ovf'. + Other possible exceptions can be specified in 'implicit_excs'. + """ + any_implicit_exception = False + if func.endswith('_ovf'): + if hop.s_result.unsigned: + raise TyperError("forbidden unsigned " + func) + else: + hop.has_implicit_exception(OverflowError) + any_implicit_exception = True + + for implicit_exc in implicit_excs: + if hop.has_implicit_exception(implicit_exc): + appendix = op_appendices[implicit_exc] + func += '_' + appendix + any_implicit_exception = True + + if not any_implicit_exception: + if not func.startswith(('mod', 'floordiv')): + return _rtype_template(hop, func) + if hop.s_result.unsigned: + return _rtype_template(hop, func) + + repr = hop.r_result + assert repr.lowleveltype != Bool + if func in ('abs_ovf', 'neg_ovf'): + vlist = hop.inputargs(repr) + else: + if func.startswith(('lshift', 'rshift')): + vlist = hop.inputargs(repr, signed_repr) + else: + vlist = hop.inputargs(repr, repr) + if any_implicit_exception: + hop.exception_is_here() + else: + hop.exception_cannot_occur() + + llfunc = globals()['ll_' + repr.opprefix + func] + v_result = hop.gendirectcall(llfunc, *vlist) + assert v_result.concretetype == repr.lowleveltype + return v_result + + INT_BITS_1 = r_int.BITS - 1 LLONG_BITS_1 = r_longlong.BITS - 1 +LLLONG_BITS_1 = r_longlonglong.BITS - 1 INT_MIN = int(-(1 << INT_BITS_1)) LLONG_MIN = r_longlong(-(1 << LLONG_BITS_1)) -def ll_correct_int_floordiv(x, y, r): + +# ---------- floordiv ---------- + +def ll_int_floordiv(x, y): + # Python, and RPython, assume that integer division truncates + # towards -infinity. However, in C, integer division truncates + # towards 0. So assuming that, we need to apply a correction + # in the right cases. + r = llop.int_floordiv(Signed, x, y) # <= truncates like in C p = r * y if y < 0: u = p - x else: u = x - p return r + (u >> INT_BITS_1) -def ll_correct_llong_floordiv(x, y, r): +def ll_int_floordiv_zer(x, y): + if y == 0: + raise ZeroDivisionError("integer division") + return ll_int_floordiv(x, y) + +def ll_uint_floordiv_zer(x, y): + if y == 0: + raise ZeroDivisionError("unsigned integer division") + return llop.uint_floordiv(Unsigned, x, y) + +def ll_int_floordiv_ovf(x, y): + # JIT: intentionally not short-circuited to produce only one guard + # and to remove the check fully if one of the arguments is known + if (x == -sys.maxint - 1) & (y == -1): + raise OverflowError("integer division") + return ll_int_floordiv(x, y) + +def ll_int_floordiv_ovf_zer(x, y): + if y == 0: + raise ZeroDivisionError("integer division") + return ll_int_floordiv_ovf(x, y) + +def ll_llong_floordiv(x, y): + r = llop.llong_floordiv(SignedLongLong, x, y) # <= truncates like in C p = r * y if y < 0: u = p - x else: u = x - p return r + (u >> LLONG_BITS_1) -def ll_correct_int_mod(y, r): +def ll_llong_floordiv_zer(x, y): + if y == 0: + raise ZeroDivisionError("longlong division") + return ll_llong_floordiv(x, y) + +def ll_ullong_floordiv_zer(x, y): + if y == 0: + raise ZeroDivisionError("unsigned longlong division") + return llop.ullong_floordiv(UnsignedLongLong, x, y) + +def ll_lllong_floordiv(x, y): + r = llop.lllong_floordiv(SignedLongLongLong, x, y) # <= truncates like in C + p = r * y + if y < 0: u = p - x + else: u = x - p + return r + (u >> LLLONG_BITS_1) + +def ll_lllong_floordiv_zer(x, y): + if y == 0: + raise ZeroDivisionError("longlonglong division") + return ll_lllong_floordiv(x, y) + + +# ---------- mod ---------- + +def ll_int_mod(x, y): + r = llop.int_mod(Signed, x, y) # <= truncates like in C if y < 0: u = -r else: u = r return r + (y & (u >> INT_BITS_1)) -def ll_correct_llong_mod(y, r): +def ll_int_mod_zer(x, y): + if y == 0: + raise ZeroDivisionError + return ll_int_mod(x, y) + +def ll_uint_mod_zer(x, y): + if y == 0: + raise ZeroDivisionError + return llop.uint_mod(Unsigned, x, y) + +def ll_int_mod_ovf(x, y): + # see comment in ll_int_floordiv_ovf + if (x == -sys.maxint - 1) & (y == -1): + raise OverflowError + return ll_int_mod(x, y) + +def ll_int_mod_ovf_zer(x, y): + if y == 0: + raise ZeroDivisionError + return ll_int_mod_ovf(x, y) + +def ll_llong_mod(x, y): + r = llop.llong_mod(SignedLongLong, x, y) # <= truncates like in C if y < 0: u = -r else: u = r return r + (y & (u >> LLONG_BITS_1)) +def ll_llong_mod_zer(x, y): + if y == 0: + raise ZeroDivisionError + return ll_llong_mod(x, y) + +def ll_ullong_mod_zer(x, y): + if y == 0: + raise ZeroDivisionError + return llop.ullong_mod(UnsignedLongLong, x, y) + +def ll_lllong_mod(x, y): + r = llop.lllong_mod(SignedLongLongLong, x, y) # <= truncates like in C + if y < 0: u = -r + else: u = r + return r + (y & (u >> LLLONG_BITS_1)) + +def ll_lllong_mod_zer(x, y): + if y == 0: + raise ZeroDivisionError + return ll_lllong_mod(x, y) + + +# ---------- add, sub, mul ---------- + + at jit.oopspec("add_ovf") +def ll_int_add_ovf(x, y): + r = intmask(r_uint(x) + r_uint(y)) + if r^x < 0 and r^y < 0: + raise OverflowError("integer addition") + return r + + at jit.oopspec("add_ovf") +def ll_int_add_nonneg_ovf(x, y): # y can be assumed >= 0 + r = intmask(r_uint(x) + r_uint(y)) + if r < x: + raise OverflowError("integer addition") + return r @jit.oopspec("sub_ovf") def ll_int_sub_ovf(x, y): r = intmask(r_uint(x) - r_uint(y)) - if r^x >= 0 or r^~y >= 0: - return r - raise OverflowError("integer subtraction") + if r^x < 0 and r^~y < 0: + raise OverflowError("integer subtraction") + return r @jit.oopspec("sub_ovf") def ll_llong_sub_ovf(x, y): r = longlongmask(r_ulonglong(x) - r_ulonglong(y)) - if r^x >= 0 or r^~y >= 0: + if r^x < 0 and r^~y < 0: + raise OverflowError("longlong subtraction") + return r + + at jit.oopspec("mul_ovf") +def ll_int_mul_ovf(a, b): + if INT_BITS_1 < LLONG_BITS_1: + rr = r_longlong(a) * r_longlong(b) + r = intmask(rr) + if r_longlong(r) != rr: + raise OverflowError("integer multiplication") return r - raise OverflowError("longlong subtraction") + else: + longprod = intmask(a * b) + doubleprod = float(a) * float(b) + doubled_longprod = float(longprod) + + # Fast path for normal case: small multiplicands, and no info + # is lost in either method. + if doubled_longprod == doubleprod: + return longprod + + # Somebody somewhere lost info. Close enough, or way off? Note + # that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). + # The difference either is or isn't significant compared to the + # true value (of which doubleprod is a good approximation). + # absdiff/absprod <= 1/32 iff 32 * absdiff <= absprod -- 5 good + # bits is "close enough" + if 32.0 * abs(doubled_longprod - doubleprod) <= abs(doubleprod): + return longprod + + raise OverflowError("integer multiplication") + + +# ---------- lshift, neg, abs ---------- + +def ll_int_lshift_ovf(x, y): + result = x << y + if (result >> y) != x: + raise OverflowError("x< Author: Armin Rigo Branch: Changeset: r83917:5c74afbd7d74 Date: 2016-04-26 21:07 +0200 http://bitbucket.org/pypy/pypy/changeset/5c74afbd7d74/ Log: Test and fix for unicode_dealloc(). diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -48,6 +48,9 @@ @cpython_api([PyObject], lltype.Void) def PyObject_dealloc(space, obj): + # This frees an object after its refcount dropped to zero, so we + # assert that it is really zero here. + assert obj.c_ob_refcnt == 0 pto = obj.c_ob_type obj_voidp = rffi.cast(rffi.VOIDP, obj) generic_cpy_call(space, pto.c_tp_free, obj_voidp) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -85,11 +85,10 @@ @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) + Py_DecRef(space, py_unicode.c_defenc) if py_unicode.c_str: lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc - if py_unicode.c_defenc: - PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) From pypy.commits at gmail.com Tue Apr 26 15:47:47 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 26 Apr 2016 12:47:47 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: merge default into branch Message-ID: <571fc5e3.d3161c0a.eb71.ffffac52@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83918:ae02014e8547 Date: 2016-04-26 22:44 +0300 http://bitbucket.org/pypy/pypy/changeset/ae02014e8547/ Log: merge default into branch diff too long, truncating to 2000 out of 2734 lines diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,15 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -222,39 +213,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -265,22 +231,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -296,15 +250,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -317,16 +266,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -108,9 +108,9 @@ On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations @@ -114,8 +120,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations @@ -133,8 +139,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,3 +10,9 @@ .. branch: gcheader-decl Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: @@ -951,8 +942,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,57 +157,16 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,19 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + + self.cls_without_del = _getusercls( + space.config, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space.config, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -54,6 +54,9 @@ @cpython_api([PyObject], lltype.Void) def PyObject_dealloc(space, obj): + # This frees an object after its refcount dropped to zero, so we + # assert that it is really zero here. + assert obj.c_ob_refcnt == 0 pto = obj.c_ob_type obj_voidp = rffi.cast(rffi.VOIDP, obj) generic_cpy_call(space, pto.c_tp_free, obj_voidp) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -140,7 +140,6 @@ 'itertools', 'time', 'binascii', 'micronumpy', ]) - spaceconfig['std.withmethodcache'] = True enable_leak_checking = True diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -85,11 +85,10 @@ @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) + Py_DecRef(space, py_unicode.c_defenc) if py_unicode.c_str: lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc - if py_unicode.c_defenc: - PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -6,15 +6,14 @@ @unwrap_spec(generation=int) def collect(space, generation=0): "Run a full collection. The optional argument is ignored." - # First clear the method cache. See test_gc for an example of why. - if space.config.objspace.std.withmethodcache: - from pypy.objspace.std.typeobject import MethodCache - cache = space.fromcache(MethodCache) - cache.clear() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import MapAttrCache - cache = space.fromcache(MapAttrCache) - cache.clear() + # First clear the method and the map cache. + # See test_gc for an example of why. + from pypy.objspace.std.typeobject import MethodCache + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MethodCache) + cache.clear() + cache = space.fromcache(MapAttrCache) + cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -106,7 +106,6 @@ class AppTestGcMethodCache(object): - spaceconfig = {"objspace.std.withmethodcache": True} def test_clear_method_cache(self): import gc, weakref @@ -127,10 +126,6 @@ assert r() is None -class AppTestGcMapDictIndexCache(AppTestGcMethodCache): - spaceconfig = {"objspace.std.withmethodcache": True, - "objspace.std.withmapdict": True} - def test_clear_index_cache(self): import gc, weakref rlist = [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,9 +25,9 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p65 = getfield_gc_r(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -121,6 +121,8 @@ 'bytearray', 'buffer', 'set', 'frozenset'] class FakeObjSpace(ObjSpace): + is_fake_objspace = True + def __init__(self, config=None): self._seen_extras = [] ObjSpace.__init__(self, config=config) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -841,34 +841,12 @@ return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') -W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] -del i def wrapstr(space, s): - if space.config.objspace.std.sharesmallstr: - if space.config.objspace.std.withprebuiltchar: - # share characters and empty string - if len(s) <= 1: - if len(s) == 0: - return W_BytesObject.EMPTY - else: - s = s[0] # annotator hint: a single char - return wrapchar(space, s) - else: - # only share the empty string - if len(s) == 0: - return W_BytesObject.EMPTY return W_BytesObject(s) -def wrapchar(space, c): - if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): - return W_BytesObject.PREBUILT[ord(c)] - else: - return W_BytesObject(c) - - W_BytesObject.typedef = TypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -23,6 +23,7 @@ def LOOKUP_METHOD(f, nameindex, *ignored): + from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # @@ -33,7 +34,7 @@ space = f.space w_obj = f.popvalue() - if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): + if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -44,7 +45,18 @@ w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) - w_descr = w_type.lookup(name) + # bit of a mess to use these internal functions, but it allows the + # mapdict caching below to work without an additional lookup + version_tag = w_type.version_tag() + if version_tag is None: + _, w_descr = w_type._lookup_where(name) + w_descr_cell = None + else: + _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( + name, version_tag) + w_descr = w_descr_cell + if isinstance(w_descr, MutableCell): + w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -59,11 +71,11 @@ # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) - if (space.config.objspace.std.withmapdict and - not jit.we_are_jitted()): + if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( - space, f.getcode(), name, nameindex, w_obj, w_type) + space, f.getcode(), name, nameindex, w_obj, w_type, + w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -66,10 +66,10 @@ w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) W_ModuleDictObject.__init__(w_obj, space, strategy, storage) return w_obj - elif space.config.objspace.std.withmapdict and instance: + elif instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - elif instance or strdict or module: + elif strdict or module: assert w_type is None strategy = space.fromcache(BytesDictStrategy) elif kwargs: @@ -592,7 +592,6 @@ return self.erase(None) def switch_to_correct_strategy(self, w_dict, w_key): - withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: self.switch_to_bytes_strategy(w_dict) return @@ -602,7 +601,7 @@ w_type = self.space.type(w_key) if self.space.is_w(w_type, self.space.w_int): self.switch_to_int_strategy(w_dict) - elif withidentitydict and w_type.compares_by_identity(): + elif w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -67,12 +67,7 @@ @jit.elidable def find_map_attr(self, name, index): - if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(name, index) - return self._find_map_attr(name, index) - - @jit.dont_look_inside - def _find_map_attr_cache(self, name, index): + # attr cache space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -429,7 +424,6 @@ class MapAttrCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self.names = [None] * SIZE @@ -456,12 +450,19 @@ INVALID = 2 SLOTS_STARTING_FROM = 3 +# a little bit of a mess of mixin classes that implement various pieces of +# objspace user object functionality in terms of mapdict -class BaseMapdictObject: - _mixin_ = True +class BaseUserClassMapdict: + # everything that's needed to use mapdict for a user subclass at all. + # This immediately makes slots possible. - def _init_empty(self, map): - raise NotImplementedError("abstract base class") + # assumes presence of _init_empty, _mapdict_read_storage, + # _mapdict_write_storage, _mapdict_storage_length, + # _set_mapdict_storage_and_map + + # _____________________________________________ + # methods needed for mapdict def _become(self, new_obj): self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) @@ -470,49 +471,11 @@ return jit.promote(self.map) def _set_mapdict_map(self, map): self.map = map + # _____________________________________________ # objspace interface - def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, attrname, DICT) - - def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, attrname, DICT, w_value) - - def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, attrname, DICT) - if new_obj is None: - return False - self._become(new_obj) - return True - - def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) - if w_dict is not None: - assert isinstance(w_dict, W_DictMultiObject) - return w_dict - - strategy = space.fromcache(MapDictStrategy) - storage = strategy.erase(self) - w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag - return w_dict - - def setdict(self, space, w_dict): - from pypy.interpreter.typedef import check_new_dictionary - w_dict = check_new_dictionary(space, w_dict) - w_olddict = self.getdict(space) - assert isinstance(w_dict, W_DictMultiObject) - # The old dict has got 'self' as dstorage, but we are about to - # change self's ("dict", SPECIAL) attribute to point to the - # new dict. If the old dict was using the MapDictStrategy, we - # have to force it now: otherwise it would remain an empty - # shell that continues to delegate to 'self'. - if type(w_olddict.get_strategy()) is MapDictStrategy: - w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag + # class access def getclass(self, space): return self._get_mapdict_map().terminator.w_cls @@ -525,9 +488,13 @@ from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space assert (not self.typedef.hasdict or + isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) + + # methods needed for slots + def getslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex return self._get_mapdict_map().read(self, "slot", index) @@ -544,7 +511,9 @@ self._become(new_obj) return True - # used by _weakref implemenation + +class MapdictWeakrefSupport(object): + # stuff used by the _weakref implementation def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline @@ -565,8 +534,71 @@ self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True -class ObjectMixin(object): - _mixin_ = True + +class MapdictDictSupport(object): + + # objspace interface for dictionary operations + + def getdictvalue(self, space, attrname): + return self._get_mapdict_map().read(self, attrname, DICT) + + def setdictvalue(self, space, attrname, w_value): + return self._get_mapdict_map().write(self, attrname, DICT, w_value) + + def deldictvalue(self, space, attrname): + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) + if new_obj is None: + return False + self._become(new_obj) + return True + + def getdict(self, space): + return _obj_getdict(self, space) + + def setdict(self, space, w_dict): + _obj_setdict(self, space, w_dict) + +# a couple of helpers for the classes above, factored out to reduce +# the translated code size + + at objectmodel.dont_inline +def _obj_getdict(self, space): + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) + if w_dict is not None: + assert isinstance(w_dict, W_DictMultiObject) + return w_dict + + strategy = space.fromcache(MapDictStrategy) + storage = strategy.erase(self) + w_dict = W_DictObject(space, strategy, storage) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + return w_dict + + at objectmodel.dont_inline +def _obj_setdict(self, space, w_dict): + from pypy.interpreter.error import OperationError + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting dictionary to a non-dict")) + assert isinstance(w_dict, W_DictMultiObject) + w_olddict = self.getdict(space) + assert isinstance(w_olddict, W_DictMultiObject) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + +class MapdictStorageMixin(object): def _init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -585,51 +617,32 @@ self.storage = storage self.map = map -class Object(ObjectMixin, BaseMapdictObject, W_Root): - pass # mainly for tests +class ObjectWithoutDict(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def get_subclass_of_correct_size(space, cls, w_type): - assert space.config.objspace.std.withmapdict - map = w_type.terminator - classes = memo_get_subclass_of_correct_size(space, cls) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - return classes[0] - size = map.size_estimate() - debug.check_nonneg(size) - if size < len(classes): - return classes[size] - else: - return classes[len(classes)-1] -get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) -SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers -SUBCLASSES_MAX_FIELDS = 5 -def memo_get_subclass_of_correct_size(space, supercls): - key = space, supercls - try: - return _subclass_cache[key] - except KeyError: - assert not hasattr(supercls, "__del__") - result = [] - for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1): - result.append(_make_subclass_size_n(supercls, i)) - for i in range(SUBCLASSES_MIN_FIELDS): - result.insert(0, result[0]) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - assert len(set(result)) == 1 - _subclass_cache[key] = result - return result -memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" -_subclass_cache = {} +class Object(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def _make_subclass_size_n(supercls, n): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) + objectmodel.import_from_mixin(MapdictDictSupport) + + +SUBCLASSES_NUM_FIELDS = 5 + +def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): from rpython.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 - class subcls(BaseMapdictObject, supercls): + class subcls(object): def _init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) @@ -697,7 +710,7 @@ erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) - subcls.__name__ = supercls.__name__ + "Size%s" % n + subcls.__name__ = "Size%s" % n return subcls # ____________________________________________________________ @@ -964,7 +977,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a MutableCell, which may change without changing the version_tag - _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( + _, w_descr = w_type._pure_lookup_where_with_method_cache( name, version_tag) # attrname, index = ("", INVALID) @@ -1011,22 +1024,15 @@ return False def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex, - w_obj, w_type): + w_obj, w_type, w_method): + if w_method is None or isinstance(w_method, MutableCell): + # don't cache the MutableCell XXX could be fixed + return version_tag = w_type.version_tag() - if version_tag is None: - return + assert version_tag is not None map = w_obj._get_mapdict_map() if map is None or isinstance(map.terminator, DevolvedDictTerminator): return - # We know here that w_obj.getdictvalue(space, name) just returned None, - # so the 'name' is not in the instance. We repeat the lookup to find it - # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a MutableCell --- which we - # must not cache. (It should not be None here, but you never know...) - _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( - name, version_tag) - if w_method is None or isinstance(w_method, MutableCell): - return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -357,15 +357,8 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if (self.config.objspace.std.withmapdict and cls is W_ObjectObject - and not w_subtype.needsdel): - from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, cls, w_subtype) - else: - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, - w_subtype.layout.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) @@ -518,7 +511,6 @@ return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) _DescrOperation_is_true = is_true - _DescrOperation_getattr = getattr def is_true(self, w_obj): # a shortcut for performance @@ -527,8 +519,6 @@ return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): - if not self.config.objspace.std.getattributeshortcut: - return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -795,13 +795,3 @@ return 42 x = Foo() assert "hello" + x == 42 - -class AppTestPrebuilt(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True} - -class AppTestShare(AppTestBytesObject): - spaceconfig = {"objspace.std.sharesmallstr": True} - -class AppTestPrebuiltShare(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True, - "objspace.std.sharesmallstr": True} diff --git a/pypy/objspace/std/test/test_callmethod.py b/pypy/objspace/std/test/test_callmethod.py --- a/pypy/objspace/std/test/test_callmethod.py +++ b/pypy/objspace/std/test/test_callmethod.py @@ -97,21 +97,17 @@ else: raise Exception("did not raise?") """ - + def test_kwargs(self): exec """if 1: class C(object): def f(self, a): return a + 2 - + assert C().f(a=3) == 5 """ -class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestCallMethod: def test_space_call_method(self): space = self.space diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1113,11 +1113,9 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withmethodcache = False - withidentitydict = False - withmapdict = False + methodcachesizeexp = 11 + withmethodcachecounter = False FakeSpace.config = Config() diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py --- a/pypy/objspace/std/test/test_identitydict.py +++ b/pypy/objspace/std/test/test_identitydict.py @@ -1,62 +1,7 @@ import py from pypy.interpreter.gateway import interp2app -class AppTestComparesByIdentity: - spaceconfig = {"objspace.std.withidentitydict": True} - - def setup_class(cls): - from pypy.objspace.std import identitydict - if cls.runappdirect: - py.test.skip("interp2app doesn't work on appdirect") - - def compares_by_identity(space, w_cls): - return space.wrap(w_cls.compares_by_identity()) - cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) - - def test_compares_by_identity(self): - class Plain(object): - pass - - class CustomEq(object): - def __eq__(self, other): - return True - - class CustomCmp (object): - def __cmp__(self, other): - return 0 - - class CustomHash(object): - def __hash__(self): - return 0 - - class TypeSubclass(type): - pass - - class TypeSubclassCustomCmp(type): - def __cmp__(self, other): - return 0 - - assert self.compares_by_identity(Plain) - assert not self.compares_by_identity(CustomEq) - assert not self.compares_by_identity(CustomCmp) - assert not self.compares_by_identity(CustomHash) - assert self.compares_by_identity(type) - assert self.compares_by_identity(TypeSubclass) - assert not self.compares_by_identity(TypeSubclassCustomCmp) - - def test_modify_class(self): - class X(object): - pass - - assert self.compares_by_identity(X) - X.__eq__ = lambda x: None - assert not self.compares_by_identity(X) - del X.__eq__ - assert self.compares_by_identity(X) - - class AppTestIdentityDict(object): - spaceconfig = {"objspace.std.withidentitydict": True} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -3,9 +3,6 @@ class AppTestIdentitySet(object): - # needed for compares_by_identity - spaceconfig = {"objspace.std.withidentitydict": True} - def setup_class(cls): from pypy.objspace.std import identitydict if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -432,6 +432,8 @@ class AppTestListObject(object): + spaceconfig = {"objspace.std.withliststrategies": True} # it's the default + def setup_class(cls): import platform import sys @@ -1590,20 +1592,13 @@ assert L3.index(-0.0, i) == i -class AppTestListObjectWithRangeList(AppTestListObject): - """Run the list object tests with range lists enabled. Tests should go in - AppTestListObject so they can be run -A against CPython as well. - """ - spaceconfig = {"objspace.std.withrangelist": True} - - class AppTestRangeListForcing: """Tests for range lists that test forcing. Regular tests should go in AppTestListObject so they can be run -A against CPython as well. Separate from AppTestListObjectWithRangeList so we don't silently overwrite tests with the same names. """ - spaceconfig = {"objspace.std.withrangelist": True} + spaceconfig = {"objspace.std.withliststrategies": True} def setup_class(cls): if cls.runappdirect: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -4,18 +4,16 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withmethodcache = False - withidentitydict = False - withmapdict = True + methodcachesizeexp = 11 + withmethodcachecounter = False space = FakeSpace() space.config = Config class Class(object): def __init__(self, hasdict=True): - self.hasdict = True + self.hasdict = hasdict if hasdict: self.terminator = DictTerminator(space, self) else: @@ -24,10 +22,17 @@ def instantiate(self, sp=None): if sp is None: sp = space - result = Object() + if self.hasdict: + result = Object() + else: + result = ObjectWithoutDict() result.user_setup(sp, self) return result +class ObjectWithoutDict(ObjectWithoutDict): + class typedef: + hasdict = False + class Object(Object): class typedef: hasdict = False @@ -431,6 +436,9 @@ assert obj.getslotvalue(b) == 60 assert obj.storage == [50, 60] assert not obj.setdictvalue(space, "a", 70) + assert obj.getdict(space) is None + assert obj.getdictvalue(space, "a") is None + def test_getdict(): cls = Class() @@ -591,15 +599,20 @@ def test_specialized_class(): + from pypy.objspace.std.mapdict import _make_storage_mixin_size_n from pypy.objspace.std.objectobject import W_ObjectObject - classes = memo_get_subclass_of_correct_size(space, W_ObjectObject) + classes = [_make_storage_mixin_size_n(i) for i in range(2, 10)] w1 = W_Root() w2 = W_Root() w3 = W_Root() w4 = W_Root() w5 = W_Root() w6 = W_Root() - for objectcls in classes: + for mixin in classes: + class objectcls(W_ObjectObject): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictDictSupport) + objectmodel.import_from_mixin(mixin) cls = Class() obj = objectcls() obj.user_setup(space, cls) @@ -646,7 +659,6 @@ # XXX write more class AppTestWithMapDict(object): - spaceconfig = {"objspace.std.withmapdict": True} def test_simple(self): class A(object): @@ -863,8 +875,7 @@ class AppTestWithMapDictAndCounters(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def setup_class(cls): from pypy.interpreter import gateway @@ -1207,8 +1218,7 @@ assert got == 'd' class AppTestGlobalCaching(AppTestWithMapDict): - spaceconfig = {"objspace.std.withmethodcachecounter": True, - "objspace.std.withmapdict": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_mix_classes(self): import __pypy__ @@ -1265,8 +1275,7 @@ assert 0, "failed: got %r" % ([got[1] for got in seen],) class TestDictSubclassShortcutBug(object): - spaceconfig = {"objspace.std.withmapdict": True, - "objspace.std.withmethodcachecounter": True} + spaceconfig = {"objspace.std.withmethodcachecounter": True} def test_bug(self): w_dict = self.space.appexec([], """(): diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -202,7 +202,8 @@ l = [type.__getattribute__(A, "__new__")(A)] * 10 __pypy__.reset_method_cache_counter() for i, a in enumerate(l): - assert a.f() == 42 + # use getattr to circumvent the mapdict cache + assert getattr(a, "f")() == 42 cache_counter = __pypy__.method_cache_counter("f") assert sum(cache_counter) == 10 if cache_counter == (9, 1): @@ -225,9 +226,11 @@ assert a.x == i + 1 A.x += 1 cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] >= 350 + # XXX this is the bad case for the mapdict cache: looking up + # non-method attributes from the class + assert cache_counter[0] >= 450 assert cache_counter[1] >= 1 - assert sum(cache_counter) == 400 + assert sum(cache_counter) == 500 __pypy__.reset_method_cache_counter() a = A() diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1,3 +1,4 @@ +import py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef @@ -1105,7 +1106,6 @@ class AppTestGetattributeShortcut: - spaceconfig = {"objspace.std.getattributeshortcut": True} def test_reset_logic(self): class X(object): @@ -1239,3 +1239,57 @@ class Y: __metaclass__ = X assert (Y < Y) is True + + +class AppTestComparesByIdentity: + + def setup_class(cls): + if cls.runappdirect: + py.test.skip("interp2app doesn't work on appdirect") + + def compares_by_identity(space, w_cls): + return space.wrap(w_cls.compares_by_identity()) + cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity)) + + def test_compares_by_identity(self): + class Plain(object): + pass + + class CustomEq(object): + def __eq__(self, other): + return True + + class CustomCmp (object): + def __cmp__(self, other): + return 0 + + class CustomHash(object): + def __hash__(self): + return 0 + + class TypeSubclass(type): + pass + + class TypeSubclassCustomCmp(type): + def __cmp__(self, other): + return 0 + + assert self.compares_by_identity(Plain) + assert not self.compares_by_identity(CustomEq) + assert not self.compares_by_identity(CustomCmp) + assert not self.compares_by_identity(CustomHash) + assert self.compares_by_identity(type) + assert self.compares_by_identity(TypeSubclass) + assert not self.compares_by_identity(TypeSubclassCustomCmp) + + def test_modify_class(self): + class X(object): + pass + + assert self.compares_by_identity(X) + X.__eq__ = lambda x: None + assert not self.compares_by_identity(X) + del X.__eq__ + assert self.compares_by_identity(X) + + diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -273,13 +273,3 @@ i += 1 -class AppTestWithGetAttributeShortcut(AppTestUserObject): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - -class AppTestDescriptorWithGetAttributeShortcut( - test_descriptor.AppTest_Descriptor): - # for the individual tests see - # ====> ../../test/test_descriptor.py - - spaceconfig = {"objspace.std.getattributeshortcut": True} diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -1,7 +1,6 @@ from pypy.objspace.std.test import test_typeobject class TestVersionedType(test_typeobject.TestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} def get_three_classes(self): space = self.space @@ -261,6 +260,3 @@ -class AppTestVersionedType(test_typeobject.AppTestTypeObject): - spaceconfig = {"objspace.std.withtypeversion": True} - diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,3 +1,4 @@ +import weakref from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root, SpaceCache from pypy.interpreter.error import oefmt, OperationError @@ -9,6 +10,7 @@ from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, elidable, dont_look_inside, unroll_safe) from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint class MutableCell(W_Root): @@ -38,9 +40,8 @@ def unwrap_cell(space, w_value): - if space.config.objspace.std.withtypeversion: - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) return w_value def write_cell(space, w_cell, w_value): @@ -69,7 +70,6 @@ class MethodCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.versions = [None] * SIZE self.names = [None] * SIZE @@ -87,6 +87,10 @@ for i in range(len(self.lookup_where)): self.lookup_where[i] = None_None +class _Global(object): + weakref_warning_printed = False +_global = _Global() + class Layout(object): From pypy.commits at gmail.com Tue Apr 26 15:47:51 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 26 Apr 2016 12:47:51 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: skip 'wishlist' test Message-ID: <571fc5e7.85661c0a.873b8.ffffc32f@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83919:e73965d016f4 Date: 2016-04-26 22:46 +0300 http://bitbucket.org/pypy/pypy/changeset/e73965d016f4/ Log: skip 'wishlist' test diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -122,8 +122,9 @@ PyBuffer_Release(&buf); return result; ''') + assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') + skip('PyByteArrayObject not implemented yet') assert 'foo\0bar\0baz' == pybuffer(bytearray('foo\0bar\0baz')) - assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') def test_pyarg_parse_string_old_buffer(self): From pypy.commits at gmail.com Tue Apr 26 16:40:59 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 13:40:59 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Must capture 'metainterp_sd.all_descrs' really at the end of Message-ID: <571fd25b.508e1c0a.3593.ffffa4d7@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83920:9e7fd5b9e987 Date: 2016-04-26 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9e7fd5b9e987/ Log: Must capture 'metainterp_sd.all_descrs' really at the end of initialization. The previous logic would miss at least one descr, 'clear_vable_descr', made by VirtualizableInfo.finish(). diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1809,7 +1809,6 @@ self._addr2name_values = [value for key, value in list_of_addr2name] def finish_setup(self, codewriter, optimizer=None): - from rpython.jit.codewriter import effectinfo from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder self.blackholeinterpbuilder = BlackholeInterpBuilder(codewriter, self) # @@ -1839,6 +1838,9 @@ self.cpu.propagate_exception_descr = exc_descr # self.globaldata = MetaInterpGlobalData(self) + + def finish_setup_descrs(self): + from rpython.jit.codewriter import effectinfo self.all_descrs = self.cpu.setup_descrs() effectinfo.compute_bitstrings(self.all_descrs) diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -132,6 +132,7 @@ metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) stats.metainterp_sd = metainterp_sd metainterp_sd.finish_setup(cw) + metainterp_sd.finish_setup_descrs() [jitdriver_sd] = metainterp_sd.jitdrivers_sd metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -277,6 +277,7 @@ for vinfo in vinfos: if vinfo is not None: vinfo.finish() + self.metainterp_sd.finish_setup_descrs() if self.cpu.translate_support_code: self.annhelper.finish() From pypy.commits at gmail.com Tue Apr 26 17:04:54 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 14:04:54 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Make sure we don't build new EffectInfo instances after Message-ID: <571fd7f6.0976c20a.9748d.3006@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83921:36636c9ac7e4 Date: 2016-04-26 23:01 +0200 http://bitbucket.org/pypy/pypy/changeset/36636c9ac7e4/ Log: Make sure we don't build new EffectInfo instances after compute_bitstrings() is called (it would fail translation if we do) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4577,6 +4577,13 @@ with py.test.raises(AnnotatorError): a.build_types(f, [float]) + def test_Ellipsis_not_rpython(self): + def f(): + return Ellipsis + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, []) + assert str(e.value) == "Don't know how to represent Ellipsis" + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -167,6 +167,16 @@ result._write_descrs_fields = write_descrs_fields result._write_descrs_arrays = write_descrs_arrays result._write_descrs_interiorfields = write_descrs_interiorfields + # initialized later, in compute_bitstrings() + # (the goal of this is to make sure we don't build new EffectInfo + # instances after compute_bitstrings() is called) + result.bitstring_readonly_descrs_fields = Ellipsis + result.bitstring_readonly_descrs_arrays = Ellipsis + result.bitstring_readonly_descrs_interiorfields = Ellipsis + result.bitstring_write_descrs_fields = Ellipsis + result.bitstring_write_descrs_arrays = Ellipsis + result.bitstring_write_descrs_interiorfields = Ellipsis + # result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex From pypy.commits at gmail.com Tue Apr 26 17:12:11 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 14:12:11 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in devin.jeanpierre/pypy-macros (pull request #435) Message-ID: <571fd9ab.876cc20a.6f1dd.38de@mx.google.com> Author: Armin Rigo Branch: Changeset: r83927:5a49cbc33223 Date: 2016-04-26 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5a49cbc33223/ Log: Merged in devin.jeanpierre/pypy-macros (pull request #435) Implement PyList_SET_ITEM with CPython's behavior, instead of SetItem's. diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,2 +1,1 @@ #define PyList_GET_ITEM PyList_GetItem -#define PyList_SET_ITEM PyList_SetItem diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -21,6 +21,25 @@ """ return space.newlist([None] * len) + at cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) +def PyList_SET_ITEM(space, w_list, index, w_item): + """Macro form of PyList_SetItem() without error checking. This is normally + only used to fill in new lists where there is no previous content. + + This function "steals" a reference to item, and, unlike PyList_SetItem(), + does not discard a reference to any item that it being replaced; any + reference in list at position i will be leaked. + """ + assert isinstance(w_list, W_ListObject) + assert 0 <= index < w_list.length + # Deliberately leak, so that it can be safely decref'd. + make_ref(space, w_list.getitem(index)) + Py_DecRef(space, w_item) + w_list.setitem(index, w_item) + return w_item + + @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyList_SetItem(space, w_list, index, w_item): """Set the item at index index in list to item. Return 0 on success diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -136,3 +136,45 @@ l = [1, 2, 3] module.setlistitem(l,0) assert l == [None, 2, 3] + + def test_get_item_macro(self): + module = self.import_extension('foo', [ + ("test_get_item", "METH_NOARGS", + """ + PyObject* o = PyList_New(1); + + PyObject* o2 = PyInt_FromLong(0); + PyList_SET_ITEM(o, 0, o2); + o2 = NULL; + + PyObject* o3 = PyList_GET_ITEM(o, 0); + Py_INCREF(o3); + Py_CLEAR(o); + return o3; + """)]) + assert module.test_get_item() == 0 + + def test_set_item_macro(self): + """PyList_SET_ITEM leaks a reference to the target.""" + module = self.import_extension('foo', [ + ("test_refcount_diff_after_setitem", "METH_NOARGS", + """ + PyObject* o = PyList_New(0); + PyObject* o2 = PyList_New(0); + + PyList_Append(o, o2); // does not steal o2 + + Py_ssize_t refcount = Py_REFCNT(o2); + + // Steal a reference to o2, but leak the old reference to o2. + // The net result should be no change in refcount. + PyList_SET_ITEM(o, 0, o2); + + Py_ssize_t new_refcount = Py_REFCNT(o2); + + Py_CLEAR(o); + Py_DECREF(o2); // append incref'd. + // Py_CLEAR(o2); // naive implementation would fail here. + return PyLong_FromSsize_t(new_refcount - refcount); + """)]) + assert module.test_refcount_diff_after_setitem() == 0 From pypy.commits at gmail.com Tue Apr 26 17:12:25 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Tue, 26 Apr 2016 14:12:25 -0700 (PDT) Subject: [pypy-commit] pypy default: Correctly leak replaced object in PyList_SET_ITEM. Message-ID: <571fd9b9.ce9d1c0a.cb3dd.ffff97dd@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83922:4c00f63f3fa2 Date: 2016-04-25 13:02 -0700 http://bitbucket.org/pypy/pypy/changeset/4c00f63f3fa2/ Log: Correctly leak replaced object in PyList_SET_ITEM. diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,2 +1,1 @@ -#define PyList_GET_ITEM PyList_GetItem #define PyList_SET_ITEM PyList_SetItem diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -21,6 +21,24 @@ """ return space.newlist([None] * len) + at cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyList_SET_ITEM(space, w_list, index, w_item): + """Set the item at index index in list to item. Return 0 on success + or -1 on failure. + + This function "steals" a reference to item, and, unlike PyList_SetItem(), + does not discard a reference to any item that it being replaced; any + reference in list at position i will be leaked. + """ + assert isinstance(w_list, W_ListObject) + assert 0 <= index < w_list.length + Py_DecRef(space, w_item) + # Deliberately leak, so that it can be safely decref'd. + make_ref(space, w_list.getitem(index)) + w_list.setitem(index, w_item) + return 0 + + @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyList_SetItem(space, w_list, index, w_item): """Set the item at index index in list to item. Return 0 on success diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -136,3 +136,28 @@ l = [1, 2, 3] module.setlistitem(l,0) assert l == [None, 2, 3] + + def test_set_item_macro(self): + """PyList_SET_ITEM leaks a reference to the target.""" + module = self.import_extension('foo', [ + ("test_refcount_diff_after_setitem", "METH_NOARGS", + """ + PyObject* o = PyList_New(0); + PyObject* o2 = PyList_New(0); + + PyList_Append(o, o2); // does not steal o2 + + Py_ssize_t refcount = Py_REFCNT(o2); + + // Steal a reference to o2, but leak the old reference to o2. + // The net result should be no change in refcount. + PyList_SET_ITEM(o, 0, o2); + + Py_ssize_t new_refcount = Py_REFCNT(o2); + + Py_CLEAR(o); + Py_DECREF(o2); // append incref'd. + // Py_CLEAR(o2); // naive implementation would fail here. + return PyLong_FromSsize_t(new_refcount - refcount); + """)]) + assert module.test_refcount_diff_after_setitem() == 0 From pypy.commits at gmail.com Tue Apr 26 17:12:27 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Tue, 26 Apr 2016 14:12:27 -0700 (PDT) Subject: [pypy-commit] pypy default: Move decref somewhere less worrisome. Message-ID: <571fd9bb.c30a1c0a.5c087.ffffb7c2@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83923:bd19ad98a253 Date: 2016-04-25 13:29 -0700 http://bitbucket.org/pypy/pypy/changeset/bd19ad98a253/ Log: Move decref somewhere less worrisome. diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,9 +32,9 @@ """ assert isinstance(w_list, W_ListObject) assert 0 <= index < w_list.length - Py_DecRef(space, w_item) # Deliberately leak, so that it can be safely decref'd. make_ref(space, w_list.getitem(index)) + Py_DecRef(space, w_item) w_list.setitem(index, w_item) return 0 From pypy.commits at gmail.com Tue Apr 26 17:12:29 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Tue, 26 Apr 2016 14:12:29 -0700 (PDT) Subject: [pypy-commit] pypy default: Restore PyList_GET_ITEM. Oops! Message-ID: <571fd9bd.2413c30a.a9bf7.35a7@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83924:901323c8c698 Date: 2016-04-26 10:07 -0700 http://bitbucket.org/pypy/pypy/changeset/901323c8c698/ Log: Restore PyList_GET_ITEM. Oops! diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,1 +1,1 @@ -#define PyList_SET_ITEM PyList_SetItem +#define PyList_GET_ITEM PyList_GetItem diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -137,6 +137,23 @@ module.setlistitem(l,0) assert l == [None, 2, 3] + def test_get_item_macro(self): + module = self.import_extension('foo', [ + ("test_get_item", "METH_NOARGS", + """ + PyObject* o = PyList_New(1); + + PyObject* o2 = PyInt_FromLong(0); + PyList_SET_ITEM(o, 0, o2); + o2 = NULL; + + PyObject* o3 = PyList_GET_ITEM(o, 0); + Py_INCREF(o3); + Py_CLEAR(o); + return o3; + """)]) + assert module.test_get_item() == 0 + def test_set_item_macro(self): """PyList_SET_ITEM leaks a reference to the target.""" module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Apr 26 17:12:31 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Tue, 26 Apr 2016 14:12:31 -0700 (PDT) Subject: [pypy-commit] pypy default: Correct docstring for PyList_SET_ITEM. Message-ID: <571fd9bf.de361c0a.143eb.ffffe553@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83925:030e55e07c81 Date: 2016-04-26 10:50 -0700 http://bitbucket.org/pypy/pypy/changeset/030e55e07c81/ Log: Correct docstring for PyList_SET_ITEM. diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -23,8 +23,8 @@ @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyList_SET_ITEM(space, w_list, index, w_item): - """Set the item at index index in list to item. Return 0 on success - or -1 on failure. + """Macro form of PyList_SetItem() without error checking. This is normally + only used to fill in new lists where there is no previous content. This function "steals" a reference to item, and, unlike PyList_SetItem(), does not discard a reference to any item that it being replaced; any From pypy.commits at gmail.com Tue Apr 26 17:12:32 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Tue, 26 Apr 2016 14:12:32 -0700 (PDT) Subject: [pypy-commit] pypy default: Correct the return type and documentation of PyList_SET_ITEM. Message-ID: <571fd9c0.022ec20a.db2c8.378f@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r83926:25a54857f693 Date: 2016-04-26 10:53 -0700 http://bitbucket.org/pypy/pypy/changeset/25a54857f693/ Log: Correct the return type and documentation of PyList_SET_ITEM. diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -21,7 +21,8 @@ """ return space.newlist([None] * len) - at cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyList_SET_ITEM(space, w_list, index, w_item): """Macro form of PyList_SetItem() without error checking. This is normally only used to fill in new lists where there is no previous content. @@ -36,7 +37,7 @@ make_ref(space, w_list.getitem(index)) Py_DecRef(space, w_item) w_list.setitem(index, w_item) - return 0 + return w_item @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) From pypy.commits at gmail.com Tue Apr 26 17:31:55 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 26 Apr 2016 14:31:55 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: tweak headers for new pypy_numpy.h Message-ID: <571fde4b.50301c0a.b22c8.ffffb4c2@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83928:60dff1f57b81 Date: 2016-04-27 00:24 +0300 http://bitbucket.org/pypy/pypy/changeset/60dff1f57b81/ Log: tweak headers for new pypy_numpy.h diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -5,6 +5,7 @@ npy_bool obval; } PyBoolScalarObject; -#define import_array() -#define PyArray_New _PyArray_New +static int import_array(){}; +static int _import_array(){}; +static int _import_math(){}; diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -10,6 +10,7 @@ extern "C" { #endif +#include "pypy_numpy.h" #include "old_defines.h" #include "npy_common.h" #include "__multiarray_api.h" From pypy.commits at gmail.com Tue Apr 26 19:08:54 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 16:08:54 -0700 (PDT) Subject: [pypy-commit] pypy py3k: simplify sys_exc_info to return None for a cleared exception and reuse some of Message-ID: <571ff506.82b71c0a.b4f8c.fffff9d1@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83929:5c78522a3851 Date: 2016-04-25 17:34 -0700 http://bitbucket.org/pypy/pypy/changeset/5c78522a3851/ Log: simplify sys_exc_info to return None for a cleared exception and reuse some of it in RAISE_VARARGS (grafted from c24bc2ff9f5d07e096456bd038b2198ede3558eb) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,7 +1,6 @@ import sys from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.objectmodel import specialize from rpython.rlib import jit TICK_COUNTER_STEP = 100 @@ -214,21 +213,18 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) - @staticmethod - def last_operr(space, frame): - while frame: - last = frame.last_exception - if (last is not None and - (not frame.hide() or - last is get_cleared_operation_error(space))): - return last - frame = frame.f_backref() - return None + @jit.dont_look_inside + def sys_exc_info(self): + """Implements sys.exc_info(). + Return an OperationError instance or None. - def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!! - """Implements sys.exc_info(). - Return an OperationError instance or None.""" - return self.last_operr(self.space, self.gettopframe()) + Ignores exceptions within hidden frames unless for_hidden=True + is specified. + + # NOTE: the result is not the wrapped sys.exc_info() !!! + + """ + return self.gettopframe()._exc_info_unroll(self.space) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,21 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + def _exc_info_unroll(self, space): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -652,24 +652,18 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs > 2: raise BytecodeCorruption("bad RAISE_VARARGS oparg") if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_RuntimeError, - space.wrap("No active exception to reraise")) + last_operr = self._exc_info_unroll(space) + if last_operr is None: + raise oefmt(space.w_RuntimeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) if nbargs == 2: w_cause = self.popvalue() if space.exception_is_valid_obj_as_class_w(w_cause): From pypy.commits at gmail.com Tue Apr 26 19:08:56 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 16:08:56 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix Message-ID: <571ff508.58811c0a.5977b.303d@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83930:16d67272ece0 Date: 2016-04-25 18:23 -0700 http://bitbucket.org/pypy/pypy/changeset/16d67272ece0/ Log: fix diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -218,9 +218,6 @@ """Implements sys.exc_info(). Return an OperationError instance or None. - Ignores exceptions within hidden frames unless for_hidden=True - is specified. - # NOTE: the result is not the wrapped sys.exc_info() !!! """ diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -191,11 +191,12 @@ last_operr = None w_traceback = self.w_traceback if w_traceback is not None and isinstance(w_traceback, PyTraceback): - ec = space.getexecutioncontext() # search for __context__ beginning in the previous frame. A # __context__ from the top most frame would have already # been handled by OperationError.record_context - last_operr = ec.last_operr(space, w_traceback.frame.f_backref()) + frame = w_traceback.frame.f_backref() + if frame: + last_operr = frame._exc_info_unroll(space) if last_operr is None: # no __context__ return space.w_None From pypy.commits at gmail.com Tue Apr 26 19:08:58 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 16:08:58 -0700 (PDT) Subject: [pypy-commit] pypy py3k: issue1903: remove the lazy __context__ recording potentially getting the wrong Message-ID: <571ff50a.a1ccc20a.fd2a9.4c9c@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83931:baabf9008933 Date: 2016-04-26 16:07 -0700 http://bitbucket.org/pypy/pypy/changeset/baabf9008933/ Log: issue1903: remove the lazy __context__ recording potentially getting the wrong result we still defer the recording -- until OperationErrors raise their way back through the interpreter (instead of @ their instantiation). this + unrolling the stack walking doesn't seem to hurt us on the runnable bits of the Grand Unified Python benchmark suite diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -31,6 +31,7 @@ _w_value = None _application_traceback = None + _context_recorded = False w_cause = None def __init__(self, w_type, w_value, tb=None, w_cause=None): @@ -329,52 +330,37 @@ self._application_traceback = tb def record_context(self, space, frame): - """Record a __context__ for this exception from the current - frame if one exists. + """Record a __context__ for this exception if one exists, + searching from the current frame. + """ + if self._context_recorded: + return + last = frame._exc_info_unroll(space) + try: + if last is not None: + self.normalize_exception(space) + w_value = self.get_w_value(space) + w_last = last.get_w_value(space) + if not space.is_w(w_value, w_last): + _break_context_cycle(space, w_value, w_last) + space.setattr(w_value, space.wrap('__context__'), w_last) + finally: + self._context_recorded = True - __context__ is otherwise lazily determined from the - traceback. However the current frame.last_exception must be - checked for a __context__ before this OperationError overwrites - it (making the previous last_exception unavailable later on). - """ - last_exception = frame.last_exception - if (last_exception is not None and not frame.hide() or - last_exception is get_cleared_operation_error(space)): - # normalize w_value so setup_context can check for cycles - self.normalize_exception(space) - w_value = self.get_w_value(space) - w_last = last_exception.get_w_value(space) - if not space.is_w(w_value, w_last): - w_context = setup_context(space, w_value, w_last, lazy=True) - space.setattr(w_value, space.wrap('__context__'), w_context) +def _break_context_cycle(space, w_value, w_context): + """Break reference cycles in the __context__ chain. -def setup_context(space, w_exc, w_last, lazy=False): - """Determine the __context__ for w_exc from w_last and break - reference cycles in the __context__ chain. + This is O(chain length) but context chains are usually very short """ - from pypy.module.exceptions.interp_exceptions import W_BaseException - if space.is_w(w_exc, w_last): - w_last = space.w_None - # w_last may also be space.w_None if from ClearedOpErr - if not space.is_w(w_last, space.w_None): - # Avoid reference cycles through the context chain. This is - # O(chain length) but context chains are usually very short. - w_obj = w_last - while True: - assert isinstance(w_obj, W_BaseException) - if lazy: - w_context = w_obj.w_context - else: - # triggers W_BaseException._setup_context - w_context = space.getattr(w_obj, space.wrap('__context__')) - if space.is_none(w_context): - break - if space.is_w(w_context, w_exc): - w_obj.w_context = space.w_None - break - w_obj = w_context - return w_last + while True: + w_next = space.getattr(w_context, space.wrap('__context__')) + if space.is_w(w_next, space.w_None): + break + if space.is_w(w_next, w_value): + space.setattr(w_context, space.wrap('__context__'), space.w_None) + break + w_context = w_next class ClearedOpErr: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -73,6 +73,7 @@ try: next_instr = self.dispatch_bytecode(co_code, next_instr, ec) except OperationError, operr: + operr.record_context(self.space, self) next_instr = self.handle_operation_error(ec, operr) except RaiseWithExplicitTraceback, e: next_instr = self.handle_operation_error(ec, e.operr, diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -61,7 +61,6 @@ tb = operror.get_traceback() tb = PyTraceback(space, frame, last_instruction, tb) operror.set_traceback(tb) - operror.record_context(space, frame) def check_traceback(space, w_tb, msg): diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py --- a/pypy/interpreter/test/test_raise.py +++ b/pypy/interpreter/test/test_raise.py @@ -387,8 +387,6 @@ except: func1() - @py.test.mark.xfail(reason="A somewhat contrived case that may burden the " - "JIT to fully support") def test_frame_spanning_cycle_broken(self): context = IndexError() def func(): @@ -399,7 +397,6 @@ raise context except Exception as e2: assert e2.__context__ is e1 - # XXX: assert e1.__context__ is None else: fail('No exception raised') @@ -419,6 +416,7 @@ except ValueError as exc: assert exc.__cause__ is None assert exc.__suppress_context__ is True + assert isinstance(exc.__context__, TypeError) exc.__suppress_context__ = False raise exc except ValueError as exc: @@ -428,6 +426,19 @@ assert isinstance(e.__context__, TypeError) """ + def test_context_in_builtin(self): + context = IndexError() + try: + try: + raise context + except: + compile('pass', 'foo', 'doh') + except ValueError as e: + assert e.__context__ is context + else: + fail('No exception raised') + + class AppTestTraceback: def test_raise_with___traceback__(self): diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -92,7 +92,7 @@ TypeDef, GetSetProperty, interp_attrproperty, descr_get_dict, descr_set_dict, descr_del_dict) from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, oefmt, setup_context +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pytraceback import PyTraceback, check_traceback from rpython.rlib import rwin32 @@ -179,28 +179,7 @@ self.suppress_context = True def descr_getcontext(self, space): - w_context = self.w_context - if w_context is None: - self.w_context = w_context = self._setup_context(space) - return w_context - - def _setup_context(self, space): - """Lazily determine __context__ from w_traceback""" - # XXX: w_traceback can be overwritten: it's not necessarily the - # authoratative traceback! - last_operr = None - w_traceback = self.w_traceback - if w_traceback is not None and isinstance(w_traceback, PyTraceback): - # search for __context__ beginning in the previous frame. A - # __context__ from the top most frame would have already - # been handled by OperationError.record_context - frame = w_traceback.frame.f_backref() - if frame: - last_operr = frame._exc_info_unroll(space) - if last_operr is None: - # no __context__ - return space.w_None - return setup_context(space, self, last_operr.get_w_value(space)) + return self.w_context def descr_setcontext(self, space, w_newcontext): if not (space.is_w(w_newcontext, space.w_None) or From pypy.commits at gmail.com Tue Apr 26 19:09:00 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 16:09:00 -0700 (PDT) Subject: [pypy-commit] pypy py3k: add a failing test from test_contextlib: we lack WHY_SILENCED Message-ID: <571ff50c.d3161c0a.eb71.ffffe442@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83932:d815525436b8 Date: 2016-04-26 16:07 -0700 http://bitbucket.org/pypy/pypy/changeset/d815525436b8/ Log: add a failing test from test_contextlib: we lack WHY_SILENCED diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py --- a/pypy/interpreter/test/test_raise.py +++ b/pypy/interpreter/test/test_raise.py @@ -438,6 +438,30 @@ else: fail('No exception raised') + def test_context_with_suppressed(self): + # XXX: requires with statement's WHY_SILENCED + class RaiseExc: + def __init__(self, exc): + self.exc = exc + def __enter__(self): + return self + def __exit__(self, *exc_details): + raise self.exc + + class SuppressExc: + def __enter__(self): + return self + def __exit__(self, *exc_details): + return True + + try: + with RaiseExc(IndexError): + with SuppressExc(): + with RaiseExc(ValueError): + 1/0 + except IndexError as exc: + assert exc.__context__ is None + class AppTestTraceback: From pypy.commits at gmail.com Tue Apr 26 19:32:26 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Apr 2016 16:32:26 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Start branch py3.5-raffael_t Message-ID: <571ffa8a.a272c20a.cb4c7.53ae@mx.google.com> Author: Ronan Lamy Branch: py3.5-raffael_t Changeset: r83933:45ca38f62861 Date: 2016-04-26 18:59 +0100 http://bitbucket.org/pypy/pypy/changeset/45ca38f62861/ Log: Start branch py3.5-raffael_t From pypy.commits at gmail.com Tue Apr 26 19:32:28 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:28 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Add @ (matmul) to Grammar and base methodtable, Switch used lib to 3.5 Message-ID: <571ffa8c.2179c20a.b24a7.58c7@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83934:aca4467e822e Date: 2016-03-21 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/aca4467e822e/ Log: Add @ (matmul) to Grammar and base methodtable, Switch used lib to 3.5 diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -826,7 +826,7 @@ from os.path import abspath, join, dirname as dn thisfile = abspath(__file__) root = dn(dn(dn(thisfile))) - return [join(root, 'lib-python', '3'), + return [join(root, 'lib-python', '3.5'), join(root, 'lib_pypy')] def pypy_resolvedirof(s): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1875,6 +1875,7 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), + ('matmul', '@', 2, ['__matmul__', ' __rmatmul__']), ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5 new file mode 100644 --- /dev/null +++ b/pypy/interpreter/pyparser/data/Grammar3.5 @@ -0,0 +1,133 @@ +# Grammar for Python + +# Note: Changing the grammar specified in this file will most likely +# require corresponding changes in the parser module +# (../Modules/parsermodule.c). If you can't make the changes to +# that module yourself, please co-ordinate the required changes +# with someone who can; ask around on python-dev for help. Fred +# Drake will probably be listening there. + +# NOTE WELL: You should also follow all the steps listed in PEP 306, +# "How to Change Python's Grammar" + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: (NEWLINE | stmt)* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef) +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' +typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' + ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) +tfpdef: NAME [':' test] +varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' + ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') +testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | + (test (comp_for | (',' test)* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: (argument ',')* (argument [','] + |'*' test (',' argument)* [',' '**' test] + |'**' test) +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +argument: test [comp_for] | test '=' test # Really [keyword '='] test +comp_iter: comp_for | comp_if +comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist From pypy.commits at gmail.com Tue Apr 26 19:32:30 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:30 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Add complete Grammar 3.5 Message-ID: <571ffa8e.d5da1c0a.89c17.ffffdad8@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83935:990e24c743e9 Date: 2016-03-21 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/990e24c743e9/ Log: Add complete Grammar 3.5 diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5 --- a/pypy/interpreter/pyparser/data/Grammar3.5 +++ b/pypy/interpreter/pyparser/data/Grammar3.5 @@ -7,8 +7,8 @@ # with someone who can; ask around on python-dev for help. Fred # Drake will probably be listening there. -# NOTE WELL: You should also follow all the steps listed in PEP 306, -# "How to Change Python's Grammar" +# NOTE WELL: You should also follow all the steps listed at +# https://docs.python.org/devguide/grammar.html # Start symbols for the grammar: # single_input is a single interactive statement; @@ -21,8 +21,11 @@ decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ -decorated: decorators (classdef | funcdef) +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: ASYNC funcdef funcdef: 'def' NAME parameters ['->' test] ':' suite + parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] @@ -65,7 +68,8 @@ nonlocal_stmt: 'nonlocal' NAME (',' NAME)* assert_stmt: 'assert' test [',' test] -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: ASYNC (funcdef | with_stmt | for_stmt) if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] @@ -89,7 +93,7 @@ not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* # <> isn't actually a valid comparison operator in Python. It's here for the -# sake of a __future__ import described in PEP 401 +# sake of a __future__ import described in PEP 401 (which really works :-) comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' star_expr: '*' expr expr: xor_expr ('|' xor_expr)* @@ -99,7 +103,8 @@ arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power -power: atom trailer* ['**' factor] +power: atom_expr ['**' factor] +atom_expr: [AWAIT] atom trailer* atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | @@ -111,17 +116,29 @@ sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] -dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | - (test (comp_for | (',' test)* [','])) ) +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite -arglist: (argument ',')* (argument [','] - |'*' test (',' argument)* [',' '**' test] - |'**' test) +arglist: argument (',' argument)* [','] + # The reason that keywords are test nodes instead of NAME is that using NAME # results in an ambiguity. ast.c makes sure it's a NAME. -argument: test [comp_for] | test '=' test # Really [keyword '='] test +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguements are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test '=' test | + '**' test | + '*' test ) + comp_iter: comp_for | comp_if comp_for: 'for' exprlist 'in' or_test [comp_iter] comp_if: 'if' test_nocond [comp_iter] From pypy.commits at gmail.com Tue Apr 26 19:32:32 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:32 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Add simple mmult (@) test Message-ID: <571ffa90.cb9a1c0a.725e5.2a83@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83936:fa2980eb159a Date: 2016-03-21 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/fa2980eb159a/ Log: Add simple mmult (@) test diff --git a/pypy/interpreter/test/test_35_mmult.py b/pypy/interpreter/test/test_35_mmult.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_35_mmult.py @@ -0,0 +1,8 @@ +import numpy + +x = numpy.ones(3) +m = numpy.eye(3) + +a = x @ m + +print(a) \ No newline at end of file From pypy.commits at gmail.com Tue Apr 26 19:32:34 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:34 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: AST um @ erweitert Message-ID: <571ffa92.8bd31c0a.5f55a.ffffe457@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83937:6b5719bb947f Date: 2016-03-21 19:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6b5719bb947f/ Log: AST um @ erweitert diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -3046,6 +3046,7 @@ BitXor = 10 BitAnd = 11 FloorDiv = 12 +MatMult = 13 operator_to_class = [ _Add, diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -17,6 +17,7 @@ '/=' : ast.Div, '//=' : ast.FloorDiv, '%=' : ast.Mod, + '@=' : ast.MatMult, '<<=' : ast.LShift, '>>=' : ast.RShift, '&=' : ast.BitAnd, @@ -37,7 +38,8 @@ tokens.STAR : ast.Mult, tokens.SLASH : ast.Div, tokens.DOUBLESLASH : ast.FloorDiv, - tokens.PERCENT : ast.Mod + tokens.PERCENT : ast.Mod, + tokens.AT : ast.MatMult }) From pypy.commits at gmail.com Tue Apr 26 19:32:36 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:36 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Add @= token, Set Grammar3.5 Message-ID: <571ffa94.a82cc20a.faa2b.5653@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83938:d253dd93b42f Date: 2016-03-21 22:23 +0100 http://bitbucket.org/pypy/pypy/changeset/d253dd93b42f/ Log: Add @= token, Set Grammar3.5 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1876,6 +1876,7 @@ ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), ('matmul', '@', 2, ['__matmul__', ' __rmatmul__']), + ('inplace_matmul', '@=', 2, ['__imatmul__']), ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5 --- a/pypy/interpreter/pyparser/data/Grammar3.5 +++ b/pypy/interpreter/pyparser/data/Grammar3.5 @@ -21,9 +21,10 @@ decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ -decorated: decorators (classdef | funcdef | async_funcdef) +decorated: decorators (classdef | funcdef) +# | async_funcdef) -async_funcdef: ASYNC funcdef +# async_funcdef: ASYNC funcdef funcdef: 'def' NAME parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' @@ -68,8 +69,9 @@ nonlocal_stmt: 'nonlocal' NAME (',' NAME)* assert_stmt: 'assert' test [',' test] -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt -async_stmt: ASYNC (funcdef | with_stmt | for_stmt) +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +# | async_stmt +# async_stmt: ASYNC (funcdef | with_stmt | for_stmt) if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] @@ -103,8 +105,9 @@ arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power -power: atom_expr ['**' factor] -atom_expr: [AWAIT] atom trailer* +# power: atom_expr ['**' factor] +power: atom trailer* ['**' factor] +# atom_expr: [AWAIT] atom trailer* atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py --- a/pypy/interpreter/pyparser/pygram.py +++ b/pypy/interpreter/pyparser/pygram.py @@ -9,7 +9,7 @@ def _get_python_grammar(): here = os.path.dirname(__file__) - fp = open(os.path.join(here, "data", "Grammar3.3")) + fp = open(os.path.join(here, "data", "Grammar3.5")) try: gram_source = fp.read() finally: diff --git a/pypy/interpreter/pyparser/pytoken.py b/pypy/interpreter/pyparser/pytoken.py --- a/pypy/interpreter/pyparser/pytoken.py +++ b/pypy/interpreter/pyparser/pytoken.py @@ -61,6 +61,7 @@ _add_tok('DOUBLESLASH', "//" ) _add_tok('DOUBLESLASHEQUAL',"//=" ) _add_tok('AT', "@" ) +_add_tok('ATEQUAL', "@=" ) _add_tok('RARROW', "->") _add_tok('ELLIPSIS', "...") _add_tok('OP') From pypy.commits at gmail.com Tue Apr 26 19:32:37 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:37 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Write matmul method as binary function in ast Message-ID: <571ffa95.85661c0a.873b8.0271@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83939:dc70c88997e4 Date: 2016-03-21 23:01 +0100 http://bitbucket.org/pypy/pypy/changeset/dc70c88997e4/ Log: Write matmul method as binary function in ast diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -567,6 +567,7 @@ ops.BINARY_SUBSCR: -1, ops.BINARY_FLOOR_DIVIDE: -1, ops.BINARY_TRUE_DIVIDE: -1, + ops.BINARY_MAT_MUL: -1, ops.BINARY_LSHIFT: -1, ops.BINARY_RSHIFT: -1, ops.BINARY_AND: -1, diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2970,6 +2970,8 @@ return 11 if space.isinstance_w(w_node, get(space).w_FloorDiv): return 12 + if space.isinstance_w(w_node, get(space).w_MatMul): + return 13 raise oefmt(space.w_TypeError, "Expected operator node, got %T", w_node) State.ast_type('operator', 'AST', None) @@ -3034,6 +3036,11 @@ return space.call_function(get(space).w_FloorDiv) State.ast_type('FloorDiv', 'operator', None) +class _MatMul(operator): + def to_object(self, space): + return space.call_function(get(space).w_MatMul) +State.ast_type('MatMul', 'operator', None) + Add = 1 Sub = 2 Mult = 3 @@ -3046,7 +3053,7 @@ BitXor = 10 BitAnd = 11 FloorDiv = 12 -MatMult = 13 +MatMul = 13 operator_to_class = [ _Add, @@ -3061,6 +3068,7 @@ _BitXor, _BitAnd, _FloorDiv, + _MatMul, ] class unaryop(AST): diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -17,7 +17,7 @@ '/=' : ast.Div, '//=' : ast.FloorDiv, '%=' : ast.Mod, - '@=' : ast.MatMult, + '@=' : ast.MatMul, '<<=' : ast.LShift, '>>=' : ast.RShift, '&=' : ast.BitAnd, @@ -39,7 +39,7 @@ tokens.SLASH : ast.Div, tokens.DOUBLESLASH : ast.FloorDiv, tokens.PERCENT : ast.Mod, - tokens.AT : ast.MatMult + tokens.AT : ast.MatMul }) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -65,7 +65,8 @@ ast.BitOr: ops.BINARY_OR, ast.BitAnd: ops.BINARY_AND, ast.BitXor: ops.BINARY_XOR, - ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE + ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE, + ast.MatMul: ops.BINARY_MAT_MUL }) inplace_operations = misc.dict_to_switch({ @@ -80,7 +81,8 @@ ast.BitOr: ops.INPLACE_OR, ast.BitAnd: ops.INPLACE_AND, ast.BitXor: ops.INPLACE_XOR, - ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE + ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE, + ast.MatMul: ops.INPLACE_MAT_MUL }) compare_operations = misc.dict_to_switch({ diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -134,6 +134,7 @@ ast.BitOr : _binary_fold("or_"), ast.BitXor : _binary_fold("xor"), ast.BitAnd : _binary_fold("and_"), + ast.MatMul : _binary_fold("matmul"), } unrolling_binary_folders = unrolling_iterable(binary_folders.items()) diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl --- a/pypy/interpreter/astcompiler/tools/Python.asdl +++ b/pypy/interpreter/astcompiler/tools/Python.asdl @@ -95,7 +95,7 @@ boolop = And | Or operator = Add | Sub | Mult | Div | Mod | Pow | LShift - | RShift | BitOr | BitXor | BitAnd | FloorDiv + | RShift | BitOr | BitXor | BitAnd | FloorDiv | MatMul unaryop = Invert | Not | UAdd | USub diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -228,6 +228,8 @@ self.BINARY_AND(oparg, next_instr) elif opcode == opcodedesc.BINARY_FLOOR_DIVIDE.index: self.BINARY_FLOOR_DIVIDE(oparg, next_instr) + elif opcode == opcodedesc.BINARY_MAT_MUL.index: + self.BINARY_MAT_MUL(oparg, next_instr) elif opcode == opcodedesc.BINARY_LSHIFT.index: self.BINARY_LSHIFT(oparg, next_instr) elif opcode == opcodedesc.BINARY_MODULO.index: @@ -570,6 +572,7 @@ BINARY_MULTIPLY = binaryoperation("mul") BINARY_TRUE_DIVIDE = binaryoperation("truediv") BINARY_FLOOR_DIVIDE = binaryoperation("floordiv") + BINARY_MAT_MUL = binaryoperation("matmul") BINARY_DIVIDE = binaryoperation("div") # XXX BINARY_DIVIDE must fall back to BINARY_TRUE_DIVIDE with -Qnew BINARY_MODULO = binaryoperation("mod") @@ -591,6 +594,7 @@ INPLACE_MULTIPLY = binaryoperation("inplace_mul") INPLACE_TRUE_DIVIDE = binaryoperation("inplace_truediv") INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_floordiv") + INPLACE_FLOOR_DIVIDE = binaryoperation("inplace_matmul") INPLACE_DIVIDE = binaryoperation("inplace_div") # XXX INPLACE_DIVIDE must fall back to INPLACE_TRUE_DIVIDE with -Qnew INPLACE_MODULO = binaryoperation("inplace_mod") From pypy.commits at gmail.com Tue Apr 26 19:32:39 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:39 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Change to right opcode for matmul Message-ID: <571ffa97.143f1c0a.ef9e7.fffffe2d@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83940:8b981defbc71 Date: 2016-03-21 23:29 +0100 http://bitbucket.org/pypy/pypy/changeset/8b981defbc71/ Log: Change to right opcode for matmul diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -567,7 +567,7 @@ ops.BINARY_SUBSCR: -1, ops.BINARY_FLOOR_DIVIDE: -1, ops.BINARY_TRUE_DIVIDE: -1, - ops.BINARY_MAT_MUL: -1, + ops.BINARY_MATRIX_MULTIPLY: -1, ops.BINARY_LSHIFT: -1, ops.BINARY_RSHIFT: -1, ops.BINARY_AND: -1, diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -228,8 +228,8 @@ self.BINARY_AND(oparg, next_instr) elif opcode == opcodedesc.BINARY_FLOOR_DIVIDE.index: self.BINARY_FLOOR_DIVIDE(oparg, next_instr) - elif opcode == opcodedesc.BINARY_MAT_MUL.index: - self.BINARY_MAT_MUL(oparg, next_instr) + elif opcode == opcodedesc.BINARY_MATRIX_MULTIPLY.index: + self.BINARY_MATRIX_MULTIPLY(oparg, next_instr) elif opcode == opcodedesc.BINARY_LSHIFT.index: self.BINARY_LSHIFT(oparg, next_instr) elif opcode == opcodedesc.BINARY_MODULO.index: @@ -572,7 +572,7 @@ BINARY_MULTIPLY = binaryoperation("mul") BINARY_TRUE_DIVIDE = binaryoperation("truediv") BINARY_FLOOR_DIVIDE = binaryoperation("floordiv") - BINARY_MAT_MUL = binaryoperation("matmul") + BINARY_MATRIX_MULTIPLY = binaryoperation("matmul") BINARY_DIVIDE = binaryoperation("div") # XXX BINARY_DIVIDE must fall back to BINARY_TRUE_DIVIDE with -Qnew BINARY_MODULO = binaryoperation("mod") From pypy.commits at gmail.com Tue Apr 26 19:32:41 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:41 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: (patch plan_rich) Set Python 3.5 as library for module, Comment unfinished ops cmds in astcompiler Message-ID: <571ffa99.aa5ec20a.71e38.535c@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83941:e8e88caf2dee Date: 2016-03-22 14:34 +0100 http://bitbucket.org/pypy/pypy/changeset/e8e88caf2dee/ Log: (patch plan_rich) Set Python 3.5 as library for module, Comment unfinished ops cmds in astcompiler diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -557,7 +557,7 @@ ops.LIST_APPEND: -1, ops.SET_ADD: -1, ops.MAP_ADD: -2, - ops.STORE_MAP: -2, + # XXX ops.STORE_MAP: -2, ops.BINARY_POWER: -1, ops.BINARY_MULTIPLY: -1, @@ -598,9 +598,9 @@ ops.PRINT_EXPR: -1, - ops.WITH_CLEANUP: -1, + # TODO ops.WITH_CLEANUP: -1, ops.LOAD_BUILD_CLASS: 1, - ops.STORE_LOCALS: -1, + # TODO ops.STORE_LOCALS: -1, ops.POP_BLOCK: 0, ops.POP_EXCEPT: -1, ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY @@ -616,7 +616,7 @@ ops.BUILD_MAP: 1, ops.COMPARE_OP: -1, - ops.LOOKUP_METHOD: 1, + # TODO ops.LOOKUP_METHOD: 1, ops.LOAD_NAME: 1, ops.STORE_NAME: -1, @@ -652,9 +652,9 @@ ops.JUMP_IF_FALSE_OR_POP: 0, ops.POP_JUMP_IF_TRUE: -1, ops.POP_JUMP_IF_FALSE: -1, - ops.JUMP_IF_NOT_DEBUG: 0, + # TODO ops.JUMP_IF_NOT_DEBUG: 0, - ops.BUILD_LIST_FROM_ARG: 1, + # TODO ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (3, 3, 5, "final", 0) +CPYTHON_VERSION = (3, 5, 1, "final", 0) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -275,6 +275,7 @@ descr_add, descr_radd = _abstract_binop('add') descr_sub, descr_rsub = _abstract_binop('sub') descr_mul, descr_rmul = _abstract_binop('mul') + descr_matmul, descr_rmatmul = _abstract_binop('matmul') descr_and, descr_rand = _abstract_binop('and') descr_or, descr_ror = _abstract_binop('or') diff --git a/pypy/tool/lib_pypy.py b/pypy/tool/lib_pypy.py --- a/pypy/tool/lib_pypy.py +++ b/pypy/tool/lib_pypy.py @@ -5,7 +5,7 @@ LIB_ROOT = py.path.local(pypy.__path__[0]).dirpath() LIB_PYPY = LIB_ROOT.join('lib_pypy') -LIB_PYTHON = LIB_ROOT.join('lib-python', '%d' % CPYTHON_VERSION[0]) +LIB_PYTHON = LIB_ROOT.join('lib-python', '%d.%d' % CPYTHON_VERSION[0:2]) def import_from_lib_pypy(modname): From pypy.commits at gmail.com Tue Apr 26 19:32:43 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:43 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Add missing opcodes to 3.5 lib, Fix error mat_mul -> matrix_multiply Message-ID: <571ffa9b.6614c20a.243bf.573a@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83942:20517040bd6a Date: 2016-03-22 16:07 +0100 http://bitbucket.org/pypy/pypy/changeset/20517040bd6a/ Log: Add missing opcodes to 3.5 lib, Fix error mat_mul -> matrix_multiply diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -66,7 +66,7 @@ ast.BitAnd: ops.BINARY_AND, ast.BitXor: ops.BINARY_XOR, ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE, - ast.MatMul: ops.BINARY_MAT_MUL + ast.MatMul: ops.BINARY_MATRIX_MULTIPLY }) inplace_operations = misc.dict_to_switch({ @@ -82,7 +82,7 @@ ast.BitAnd: ops.INPLACE_AND, ast.BitXor: ops.INPLACE_XOR, ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE, - ast.MatMul: ops.INPLACE_MAT_MUL + ast.MatMul: ops.INPLACE_MATRIX_MULTIPLY }) compare_operations = misc.dict_to_switch({ From pypy.commits at gmail.com Tue Apr 26 19:32:44 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:44 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Define matmul method in module, Add to dict in objspace Message-ID: <571ffa9c.81da1c0a.6564a.022e@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83943:6eacb28b58d1 Date: 2016-03-22 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/6eacb28b58d1/ Log: Define matmul method in module, Add to dict in objspace diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -89,6 +89,7 @@ ('Xor', 'xor'), ('Or', 'or_'), ('Divmod', 'divmod'), + ('MatrixMultiply', 'matmul') ]: make_numbermethod(name, spacemeth) if name != 'Divmod': diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -28,7 +28,7 @@ 'le', 'lshift', 'lt', 'mod', 'mul', 'ne', 'neg', 'not_', 'or_', 'pos', 'pow', 'rshift', 'setitem', - 'sub', 'truediv', 'truth', 'xor', + 'sub', 'truediv', 'matmul', 'truth', 'xor', 'iadd', 'iand', 'iconcat', 'ifloordiv', 'ilshift', 'imod', 'imul', 'ior', 'ipow', 'irshift', 'isub', 'itruediv', 'ixor', '_length_hint', @@ -72,6 +72,7 @@ '__sub__' : 'sub', '__truediv__' : 'truediv', '__xor__' : 'xor', + '__matmul__' : 'matmul', # in-place '__iadd__' : 'iadd', '__iand__' : 'iand', diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -143,6 +143,10 @@ 'xor(a, b) -- Same as a ^ b.' return space.xor(w_a, w_b) +def matmul(space, w_a, w_b): + 'matmul(a, b) -- Same as a @ b.' + return space.matmul(w_a, w_b) + # in-place operations def iadd(space, w_obj1, w_obj2): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -15,7 +15,7 @@ BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', 'xor': '^'} BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', - truediv='/', **BINARY_BITWISE_OPS) + truediv='/', matmul='@', **BINARY_BITWISE_OPS) COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') From pypy.commits at gmail.com Tue Apr 26 19:32:47 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:47 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Clean up unused 3.5 opcodes, Define inplace_matmul operator Message-ID: <571ffa9f.2457c20a.4ec44.5e9e@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83944:e441185f38d2 Date: 2016-03-22 22:25 +0100 http://bitbucket.org/pypy/pypy/changeset/e441185f38d2/ Log: Clean up unused 3.5 opcodes, Define inplace_matmul operator diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -581,6 +581,7 @@ ops.INPLACE_MULTIPLY: -1, ops.INPLACE_MODULO: -1, ops.INPLACE_POWER: -1, + ops.INPLACE_MATRIX_MULTIPLY: -1, ops.INPLACE_LSHIFT: -1, ops.INPLACE_RSHIFT: -1, ops.INPLACE_AND: -1, diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -572,10 +572,10 @@ BINARY_MULTIPLY = binaryoperation("mul") BINARY_TRUE_DIVIDE = binaryoperation("truediv") BINARY_FLOOR_DIVIDE = binaryoperation("floordiv") - BINARY_MATRIX_MULTIPLY = binaryoperation("matmul") BINARY_DIVIDE = binaryoperation("div") # XXX BINARY_DIVIDE must fall back to BINARY_TRUE_DIVIDE with -Qnew BINARY_MODULO = binaryoperation("mod") + BINARY_MATRIX_MULTIPLY = binaryoperation("matmul") BINARY_ADD = binaryoperation("add") BINARY_SUBTRACT = binaryoperation("sub") BINARY_SUBSCR = binaryoperation("getitem") @@ -598,6 +598,7 @@ INPLACE_DIVIDE = binaryoperation("inplace_div") # XXX INPLACE_DIVIDE must fall back to INPLACE_TRUE_DIVIDE with -Qnew INPLACE_MODULO = binaryoperation("inplace_mod") + INPLACE_MATRIX_MULTIPLY = binaryoperation("inplace_matmul") INPLACE_ADD = binaryoperation("inplace_add") INPLACE_SUBTRACT = binaryoperation("inplace_sub") INPLACE_LSHIFT = binaryoperation("inplace_lshift") diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -197,6 +197,10 @@ 'ixor(a, b) -- Same as a ^= b.' return space.inplace_xor(w_a, w_b) +def imatmul(space, w_a, w_b): + 'imatmul(a, b) -- Same as a @= b.' + return space.inplace_matmul(w_a, w_b) + def iconcat(space, w_obj1, w_obj2): 'iconcat(a, b) -- Same as a += b, for a and b sequences.' if (space.lookup(w_obj1, '__getitem__') is None or diff --git a/pypy/tool/opcode3.py b/pypy/tool/opcode3.py --- a/pypy/tool/opcode3.py +++ b/pypy/tool/opcode3.py @@ -5,6 +5,7 @@ "Backported" from Python 3 to Python 2 land - an excact copy of lib-python/3/opcode.py """ + __all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", "haslocal", "hascompare", "hasfree", "opname", "opmap", "HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"] From pypy.commits at gmail.com Tue Apr 26 19:32:49 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:49 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Remove undefined Grammar again, so that only @ is new (fix Assertionerror) Message-ID: <571ffaa1.d81a1c0a.16653.0271@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83945:a88ed4caa6dd Date: 2016-03-22 22:48 +0100 http://bitbucket.org/pypy/pypy/changeset/a88ed4caa6dd/ Log: Remove undefined Grammar again, so that only @ is new (fix Assertionerror) diff --git a/pypy/interpreter/pyparser/data/Grammar3.5 b/pypy/interpreter/pyparser/data/Grammar3.5 --- a/pypy/interpreter/pyparser/data/Grammar3.5 +++ b/pypy/interpreter/pyparser/data/Grammar3.5 @@ -119,14 +119,19 @@ sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] -dictorsetmaker: ( ((test ':' test | '**' expr) - (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) +dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | + (test (comp_for | (',' test)* [','])) ) +#dictorsetmaker: ( ((test ':' test | '**' expr) +# (comp_for | (',' (test ':' test | '**' expr))* [','])) | +# ((test | star_expr) +# (comp_for | (',' (test | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite -arglist: argument (',' argument)* [','] +arglist: (argument ',')* (argument [','] + |'*' test (',' argument)* [',' '**' test] + |'**' test) +#arglist: argument (',' argument)* [','] # The reason that keywords are test nodes instead of NAME is that using NAME # results in an ambiguity. ast.c makes sure it's a NAME. @@ -137,10 +142,11 @@ # Illegal combinations and orderings are blocked in ast.c: # multiple (test comp_for) arguements are blocked; keyword unpackings # that precede iterable unpackings are blocked; etc. -argument: ( test [comp_for] | - test '=' test | - '**' test | - '*' test ) +argument: test [comp_for] | test '=' test # Really [keyword '='] test +#argument: ( test [comp_for] | +# test '=' test | +# '**' test | +# '*' test ) comp_iter: comp_for | comp_if comp_for: 'for' exprlist 'in' or_test [comp_iter] From pypy.commits at gmail.com Tue Apr 26 19:32:50 2016 From: pypy.commits at gmail.com (raff...@gmail.com) Date: Tue, 26 Apr 2016 16:32:50 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Uncomment opscode stacks Message-ID: <571ffaa2.de361c0a.143eb.0607@mx.google.com> Author: raffael.tfirst at gmail.com Branch: py3.5-raffael_t Changeset: r83946:a39518eb84c3 Date: 2016-03-22 23:40 +0100 http://bitbucket.org/pypy/pypy/changeset/a39518eb84c3/ Log: Uncomment opscode stacks diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -557,7 +557,8 @@ ops.LIST_APPEND: -1, ops.SET_ADD: -1, ops.MAP_ADD: -2, - # XXX ops.STORE_MAP: -2, + # XXX + ops.STORE_MAP: -2, ops.BINARY_POWER: -1, ops.BINARY_MULTIPLY: -1, @@ -599,9 +600,11 @@ ops.PRINT_EXPR: -1, - # TODO ops.WITH_CLEANUP: -1, + # TODO + ops.WITH_CLEANUP: -1, ops.LOAD_BUILD_CLASS: 1, - # TODO ops.STORE_LOCALS: -1, + # TODO + ops.STORE_LOCALS: -1, ops.POP_BLOCK: 0, ops.POP_EXCEPT: -1, ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY @@ -617,7 +620,8 @@ ops.BUILD_MAP: 1, ops.COMPARE_OP: -1, - # TODO ops.LOOKUP_METHOD: 1, + # TODO + ops.LOOKUP_METHOD: 1, ops.LOAD_NAME: 1, ops.STORE_NAME: -1, @@ -653,9 +657,11 @@ ops.JUMP_IF_FALSE_OR_POP: 0, ops.POP_JUMP_IF_TRUE: -1, ops.POP_JUMP_IF_FALSE: -1, - # TODO ops.JUMP_IF_NOT_DEBUG: 0, + # TODO + ops.JUMP_IF_NOT_DEBUG: 0, - # TODO ops.BUILD_LIST_FROM_ARG: 1, + # TODO + ops.BUILD_LIST_FROM_ARG: 1, } From pypy.commits at gmail.com Tue Apr 26 19:32:52 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Apr 2016 16:32:52 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Fix location of stdlib after rebasing Message-ID: <571ffaa4.0f801c0a.51e59.ffffcb2b@mx.google.com> Author: Ronan Lamy Branch: py3.5-raffael_t Changeset: r83947:5f1dc236f900 Date: 2016-04-26 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/5f1dc236f900/ Log: Fix location of stdlib after rebasing diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -826,7 +826,7 @@ from os.path import abspath, join, dirname as dn thisfile = abspath(__file__) root = dn(dn(dn(thisfile))) - return [join(root, 'lib-python', '3.5'), + return [join(root, 'lib-python', '3'), join(root, 'lib_pypy')] def pypy_resolvedirof(s): diff --git a/pypy/tool/lib_pypy.py b/pypy/tool/lib_pypy.py --- a/pypy/tool/lib_pypy.py +++ b/pypy/tool/lib_pypy.py @@ -5,7 +5,7 @@ LIB_ROOT = py.path.local(pypy.__path__[0]).dirpath() LIB_PYPY = LIB_ROOT.join('lib_pypy') -LIB_PYTHON = LIB_ROOT.join('lib-python', '%d.%d' % CPYTHON_VERSION[0:2]) +LIB_PYTHON = LIB_ROOT.join('lib-python', '%d' % CPYTHON_VERSION[0]) def import_from_lib_pypy(modname): From pypy.commits at gmail.com Tue Apr 26 19:32:54 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Apr 2016 16:32:54 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: Reapply Raffael's changes to opcode.py Message-ID: <571ffaa6.442cc20a.ce956.5688@mx.google.com> Author: Ronan Lamy Branch: py3.5-raffael_t Changeset: r83948:38d59646e345 Date: 2016-04-26 19:28 +0100 http://bitbucket.org/pypy/pypy/changeset/38d59646e345/ Log: Reapply Raffael's changes to opcode.py diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py --- a/lib-python/3/opcode.py +++ b/lib-python/3/opcode.py @@ -85,10 +85,7 @@ def_op('INPLACE_FLOOR_DIVIDE', 28) def_op('INPLACE_TRUE_DIVIDE', 29) -def_op('GET_AITER', 50) -def_op('GET_ANEXT', 51) -def_op('BEFORE_ASYNC_WITH', 52) - +def_op('STORE_MAP', 54) def_op('INPLACE_ADD', 55) def_op('INPLACE_SUBTRACT', 56) def_op('INPLACE_MULTIPLY', 57) @@ -103,12 +100,11 @@ def_op('BINARY_OR', 66) def_op('INPLACE_POWER', 67) def_op('GET_ITER', 68) -def_op('GET_YIELD_FROM_ITER', 69) +def_op('STORE_LOCALS', 69) def_op('PRINT_EXPR', 70) def_op('LOAD_BUILD_CLASS', 71) def_op('YIELD_FROM', 72) -def_op('GET_AWAITABLE', 73) def_op('INPLACE_LSHIFT', 75) def_op('INPLACE_RSHIFT', 76) @@ -116,8 +112,7 @@ def_op('INPLACE_XOR', 78) def_op('INPLACE_OR', 79) def_op('BREAK_LOOP', 80) -def_op('WITH_CLEANUP_START', 81) -def_op('WITH_CLEANUP_FINISH', 82) +def_op('WITH_CLEANUP', 81) def_op('RETURN_VALUE', 83) def_op('IMPORT_STAR', 84) @@ -200,20 +195,9 @@ def_op('SET_ADD', 146) def_op('MAP_ADD', 147) -def_op('LOAD_CLASSDEREF', 148) -hasfree.append(148) - -jrel_op('SETUP_ASYNC_WITH', 154) - def_op('EXTENDED_ARG', 144) EXTENDED_ARG = 144 -def_op('BUILD_LIST_UNPACK', 149) -def_op('BUILD_MAP_UNPACK', 150) -def_op('BUILD_MAP_UNPACK_WITH_CALL', 151) -def_op('BUILD_TUPLE_UNPACK', 152) -def_op('BUILD_SET_UNPACK', 153) - # pypy modification, experimental bytecode def_op('LOOKUP_METHOD', 201) # Index in name list hasname.append(201) From pypy.commits at gmail.com Tue Apr 26 19:32:56 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Apr 2016 16:32:56 -0700 (PDT) Subject: [pypy-commit] pypy py3.5-raffael_t: fix typo Message-ID: <571ffaa8.109a1c0a.e5588.fffffe41@mx.google.com> Author: Ronan Lamy Branch: py3.5-raffael_t Changeset: r83949:a31d7ae793ce Date: 2016-04-26 19:29 +0100 http://bitbucket.org/pypy/pypy/changeset/a31d7ae793ce/ Log: fix typo diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1875,7 +1875,7 @@ ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), ('userdel', 'del', 1, ['__del__']), - ('matmul', '@', 2, ['__matmul__', ' __rmatmul__']), + ('matmul', '@', 2, ['__matmul__', '__rmatmul__']), ('inplace_matmul', '@=', 2, ['__imatmul__']), ] From pypy.commits at gmail.com Tue Apr 26 23:09:46 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 20:09:46 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix exec from @hidden_applevel functions Message-ID: <57202d7a.c711c30a.91c1e.ffff8048@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83950:1921e21b5b4b Date: 2016-04-26 20:08 -0700 http://bitbucket.org/pypy/pypy/changeset/1921e21b5b4b/ Log: fix exec from @hidden_applevel functions diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -90,7 +90,7 @@ return code.exec_code(space, w_globals, w_locals) def exec_(space, w_prog, w_globals=None, w_locals=None): - frame = space.getexecutioncontext().gettopframe_nohidden() + frame = space.getexecutioncontext().gettopframe() frame.exec_(w_prog, w_globals, w_locals) def build_class(space, w_func, w_name, __args__): From pypy.commits at gmail.com Tue Apr 26 23:09:48 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 20:09:48 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hide run_command_line now that exec works Message-ID: <57202d7c.2457c20a.4ec44.ffff855b@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83951:98c9d7225474 Date: 2016-04-26 20:08 -0700 http://bitbucket.org/pypy/pypy/changeset/98c9d7225474/ Log: hide run_command_line now that exec works diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -511,6 +511,7 @@ def exec_(src, dic): exec(src, dic) + at hidden_applevel def run_command_line(interactive, inspect, run_command, From pypy.commits at gmail.com Tue Apr 26 23:09:49 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 20:09:49 -0700 (PDT) Subject: [pypy-commit] pypy py3k: globals deserves module=True Message-ID: <57202d7d.46291c0a.e3346.0399@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83952:ae66ec7f9ce7 Date: 2016-04-26 20:08 -0700 http://bitbucket.org/pypy/pypy/changeset/ae66ec7f9ce7/ Log: globals deserves module=True diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1629,7 +1629,7 @@ if caller is None: caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: - w_globals = space.newdict() + w_globals = space.newdict(module=True) if space.is_none(w_locals): w_locals = w_globals else: From pypy.commits at gmail.com Wed Apr 27 00:19:18 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:19:18 -0700 (PDT) Subject: [pypy-commit] pypy default: don't prepend '' more than once (not exactly sure how this happens but Message-ID: <57203dc6.85661c0a.873b8.3860@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83953:c87392dd55fe Date: 2016-04-26 21:14 -0700 http://bitbucket.org/pypy/pypy/changeset/c87392dd55fe/ Log: don't prepend '' more than once (not exactly sure how this happens but it does) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -162,7 +162,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) From pypy.commits at gmail.com Wed Apr 27 00:19:20 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:19:20 -0700 (PDT) Subject: [pypy-commit] pypy py3k: don't prepend '' more than once (not exactly sure how this happens but Message-ID: <57203dc8.d3161c0a.eb71.1f06@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83954:1e6beaeb48bb Date: 2016-04-26 21:14 -0700 http://bitbucket.org/pypy/pypy/changeset/1e6beaeb48bb/ Log: don't prepend '' more than once (not exactly sure how this happens but it does) (grafted from c87392dd55febe92a4b704165ce97ebfc6df17b3) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -188,7 +188,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) From pypy.commits at gmail.com Wed Apr 27 00:19:22 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:19:22 -0700 (PDT) Subject: [pypy-commit] pypy py3k: adjust per PyCode's changes to frozen modules and app_name='__import__' Message-ID: <57203dca.455ec20a.aa6df.ffff8b56@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83955:d876ff0650f5 Date: 2016-04-26 21:16 -0700 http://bitbucket.org/pypy/pypy/changeset/d876ff0650f5/ Log: adjust per PyCode's changes to frozen modules and app_name='__import__' diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py --- a/pypy/module/_frozen_importlib/interp_import.py +++ b/pypy/module/_frozen_importlib/interp_import.py @@ -1,12 +1,14 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError - at interp2app def import_with_frames_removed(space, __args__): try: return space.call_args( space.getbuiltinmodule('_frozen_importlib').getdictvalue( space, '__import__'), __args__) except OperationError as e: - e.remove_traceback_module_frames('') + e.remove_traceback_module_frames( + '/frozen importlib._bootstrap') raise +import_with_frames_removed = interp2app(import_with_frames_removed, + app_name='__import__') From pypy.commits at gmail.com Wed Apr 27 00:56:02 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:56:02 -0700 (PDT) Subject: [pypy-commit] pypy py3k: fix reraise in hidden functions Message-ID: <57204662.8bd31c0a.5f55a.21ba@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83957:b1242e337f6c Date: 2016-04-26 21:47 -0700 http://bitbucket.org/pypy/pypy/changeset/b1242e337f6c/ Log: fix reraise in hidden functions (grafted from c4d7b8fef89a557df8cc1c926471c0c4d04b9bb3) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -658,7 +658,7 @@ if nbargs > 2: raise BytecodeCorruption("bad RAISE_VARARGS oparg") if nbargs == 0: - last_operr = self._exc_info_unroll(space) + last_operr = self._exc_info_unroll(space, for_hidden=True) if last_operr is None: raise oefmt(space.w_RuntimeError, "No active exception to reraise") From pypy.commits at gmail.com Wed Apr 27 00:56:00 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:56:00 -0700 (PDT) Subject: [pypy-commit] pypy default: fix reraise in hidden functions Message-ID: <57204660.cb9a1c0a.725e5.6783@mx.google.com> Author: Philip Jenvey Branch: Changeset: r83956:c4d7b8fef89a Date: 2016-04-26 21:47 -0700 http://bitbucket.org/pypy/pypy/changeset/c4d7b8fef89a/ Log: fix reraise in hidden functions diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -742,7 +742,7 @@ def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - last_operr = self._exc_info_unroll(space) + last_operr = self._exc_info_unroll(space, for_hidden=True) if last_operr is None: raise oefmt(space.w_TypeError, "No active exception to reraise") diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -60,6 +60,7 @@ import __pypy__ import sys + result = [False] @__pypy__.hidden_applevel def test_hidden_with_tb(): def not_hidden(): 1/0 @@ -68,9 +69,11 @@ assert sys.exc_info() == (None, None, None) tb = __pypy__.get_hidden_tb() assert tb.tb_frame.f_code.co_name == 'not_hidden' - return True + result[0] = True + raise else: return False - assert test_hidden_with_tb() + raises(ZeroDivisionError, test_hidden_with_tb) + assert result[0] def test_lookup_special(self): from __pypy__ import lookup_special From pypy.commits at gmail.com Wed Apr 27 00:56:04 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 26 Apr 2016 21:56:04 -0700 (PDT) Subject: [pypy-commit] pypy py3k: readd for_hidden from default Message-ID: <57204664.ce9d1c0a.cb3dd.fffff79a@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r83958:402867a81fa5 Date: 2016-04-26 21:48 -0700 http://bitbucket.org/pypy/pypy/changeset/402867a81fa5/ Log: readd for_hidden from default diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import instantiate, we_are_translated +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -872,7 +872,8 @@ return space.w_False @jit.unroll_safe - def _exc_info_unroll(self, space): + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): """Return the most recent OperationError being handled in the call stack """ @@ -882,7 +883,7 @@ if last is not None: if last is get_cleared_operation_error(self.space): break - if not frame.hide(): + if for_hidden or not frame.hide(): return last frame = frame.f_backref() return None From pypy.commits at gmail.com Wed Apr 27 02:49:51 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Apr 2016 23:49:51 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: Dump logs about the bitstring compression Message-ID: <5720610f.26b0c20a.cb528.ffffba86@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83959:e341fe8db539 Date: 2016-04-27 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/e341fe8db539/ Log: Dump logs about the bitstring compression diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -445,6 +445,9 @@ # across multiple Descrs if they always give the same answer (in # PyPy, it reduces the length of the bitstrings from 4000+ to # 373). + from rpython.jit.codewriter.policy import log + + log("compute_bitstrings:") effectinfos = [] descrs = {'fields': set(), 'arrays': set(), 'interiorfields': set()} for descr in all_descrs: @@ -465,7 +468,11 @@ descrs[key].update(getattr(ei, '_write_descrs_' + key)) else: descr.ei_index = sys.maxint + log(" %d effectinfos:" % (len(effectinfos),)) + for key in sorted(descrs): + log(" %d descrs for %s" % (len(descrs[key]), key)) + seen = set() for key in descrs: all_sets = [] for descr in descrs[key]: @@ -503,3 +510,11 @@ bitstrw = bitstring.make_bitstring(bitstrw) setattr(ei, 'bitstring_readonly_descrs_' + key, bitstrr) setattr(ei, 'bitstring_write_descrs_' + key, bitstrw) + seen.add(bitstrr) + seen.add(bitstrw) + + if seen: + mean_length = float(sum(len(x) for x in seen)) / len(seen) + max_length = max(len(x) for x in seen) + log("-> %d bitstrings, mean length %.1f, max length %d" % ( + len(seen), mean_length, max_length)) From pypy.commits at gmail.com Wed Apr 27 03:17:34 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 00:17:34 -0700 (PDT) Subject: [pypy-commit] pypy bitstring: ready to merge Message-ID: <5720678e.4374c20a.52888.ffffc2d4@mx.google.com> Author: Armin Rigo Branch: bitstring Changeset: r83960:af6b5f003312 Date: 2016-04-27 09:09 +0200 http://bitbucket.org/pypy/pypy/changeset/af6b5f003312/ Log: ready to merge From pypy.commits at gmail.com Wed Apr 27 03:17:36 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 00:17:36 -0700 (PDT) Subject: [pypy-commit] pypy default: hg merge bitstring Message-ID: <57206790.91d31c0a.2a5a6.6f83@mx.google.com> Author: Armin Rigo Branch: Changeset: r83961:b985ddd2357b Date: 2016-04-27 09:16 +0200 http://bitbucket.org/pypy/pypy/changeset/b985ddd2357b/ Log: hg merge bitstring Use bitstrings to compress the lists of read or written descrs that we attach to EffectInfo. It is not necessarily more compact (if there are a lot of tiny lists), but it fixes two problems: * optimizeopt/heap.py: we no longer have the potential problem of having to walk some very large lists of descrs. Now we walk the descrs that are cached at this point, and check if each one is in the bitstring. * backendopt/writeanalyze.py: as a result we can get rid of the arbitrary limit on the size of the lists, which got in the way recently. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4577,6 +4577,13 @@ with py.test.raises(AnnotatorError): a.build_types(f, [float]) + def test_Ellipsis_not_rpython(self): + def f(): + return Ellipsis + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, []) + assert str(e.value) == "Don't know how to represent Ellipsis" + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -479,6 +479,9 @@ all_descrs.append(v) return all_descrs + def fetch_all_descrs(self): + return self.descrs.values() + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -1,7 +1,9 @@ +import sys from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem from rpython.rtyper.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer +from rpython.tool.algo import bitstring class EffectInfo(object): @@ -110,12 +112,20 @@ can_invalidate=False, call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, extradescrs=None): - key = (frozenset_or_none(readonly_descrs_fields), - frozenset_or_none(readonly_descrs_arrays), - frozenset_or_none(readonly_descrs_interiorfields), - frozenset_or_none(write_descrs_fields), - frozenset_or_none(write_descrs_arrays), - frozenset_or_none(write_descrs_interiorfields), + readonly_descrs_fields = frozenset_or_none(readonly_descrs_fields) + readonly_descrs_arrays = frozenset_or_none(readonly_descrs_arrays) + readonly_descrs_interiorfields = frozenset_or_none( + readonly_descrs_interiorfields) + write_descrs_fields = frozenset_or_none(write_descrs_fields) + write_descrs_arrays = frozenset_or_none(write_descrs_arrays) + write_descrs_interiorfields = frozenset_or_none( + write_descrs_interiorfields) + key = (readonly_descrs_fields, + readonly_descrs_arrays, + readonly_descrs_interiorfields, + write_descrs_fields, + write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate) @@ -139,22 +149,34 @@ assert write_descrs_arrays is not None assert write_descrs_interiorfields is not None result = object.__new__(cls) - result.readonly_descrs_fields = readonly_descrs_fields - result.readonly_descrs_arrays = readonly_descrs_arrays - result.readonly_descrs_interiorfields = readonly_descrs_interiorfields + # the frozensets "._readonly_xxx" and "._write_xxx" should not be + # translated. + result._readonly_descrs_fields = readonly_descrs_fields + result._readonly_descrs_arrays = readonly_descrs_arrays + result._readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: # Ignore the writes. Note that this ignores also writes with # no corresponding reads (rarely the case, but possible). - result.write_descrs_fields = [] - result.write_descrs_arrays = [] - result.write_descrs_interiorfields = [] + result._write_descrs_fields = frozenset() + result._write_descrs_arrays = frozenset() + result._write_descrs_interiorfields = frozenset() else: - result.write_descrs_fields = write_descrs_fields - result.write_descrs_arrays = write_descrs_arrays - result.write_descrs_interiorfields = write_descrs_interiorfields + result._write_descrs_fields = write_descrs_fields + result._write_descrs_arrays = write_descrs_arrays + result._write_descrs_interiorfields = write_descrs_interiorfields + # initialized later, in compute_bitstrings() + # (the goal of this is to make sure we don't build new EffectInfo + # instances after compute_bitstrings() is called) + result.bitstring_readonly_descrs_fields = Ellipsis + result.bitstring_readonly_descrs_arrays = Ellipsis + result.bitstring_readonly_descrs_interiorfields = Ellipsis + result.bitstring_write_descrs_fields = Ellipsis + result.bitstring_write_descrs_arrays = Ellipsis + result.bitstring_write_descrs_interiorfields = Ellipsis + # result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex @@ -162,9 +184,38 @@ result.call_release_gil_target = call_release_gil_target if result.check_can_raise(ignore_memoryerror=True): assert oopspecindex in cls._OS_CANRAISE + + if (result._write_descrs_arrays is not None and + len(result._write_descrs_arrays) == 1): + # this is used only for ARRAYCOPY operations + [result.single_write_descr_array] = result._write_descrs_arrays + else: + result.single_write_descr_array = None + cls._cache[key] = result return result + def check_readonly_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_fields, + fielddescr.ei_index) + def check_write_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_write_descrs_fields, + fielddescr.ei_index) + def check_readonly_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_arrays, + arraydescr.ei_index) + def check_write_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_write_descrs_arrays, + arraydescr.ei_index) + def check_readonly_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far + return bitstring.bitcheck(self.bitstring_readonly_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_write_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far + return bitstring.bitcheck(self.bitstring_write_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_can_raise(self, ignore_memoryerror=False): if ignore_memoryerror: return self.extraeffect > self.EF_ELIDABLE_OR_MEMORYERROR @@ -382,3 +433,88 @@ assert funcptr return funcptr funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(1)' + +# ____________________________________________________________ + +def compute_bitstrings(all_descrs): + # Compute the bitstrings in the EffectInfo, + # bitstring_{readonly,write}_descrs_{fieldd,arrays,interiordescrs}, + # and for each FieldDescrs and ArrayDescrs compute 'ei_index'. + # Each bit in the bitstrings says whether this Descr is present in + # this EffectInfo or not. We try to share the value of 'ei_index' + # across multiple Descrs if they always give the same answer (in + # PyPy, it reduces the length of the bitstrings from 4000+ to + # 373). + from rpython.jit.codewriter.policy import log + + log("compute_bitstrings:") + effectinfos = [] + descrs = {'fields': set(), 'arrays': set(), 'interiorfields': set()} + for descr in all_descrs: + if hasattr(descr, 'get_extra_info'): + ei = descr.get_extra_info() + if ei is None: + continue + if ei._readonly_descrs_fields is None: + for key in descrs: + assert getattr(ei, '_readonly_descrs_' + key) is None + assert getattr(ei, '_write_descrs_' + key) is None + setattr(ei, 'bitstring_readonly_descrs_' + key, None) + setattr(ei, 'bitstring_write_descrs_' + key, None) + else: + effectinfos.append(ei) + for key in descrs: + descrs[key].update(getattr(ei, '_readonly_descrs_' + key)) + descrs[key].update(getattr(ei, '_write_descrs_' + key)) + else: + descr.ei_index = sys.maxint + log(" %d effectinfos:" % (len(effectinfos),)) + for key in sorted(descrs): + log(" %d descrs for %s" % (len(descrs[key]), key)) + + seen = set() + for key in descrs: + all_sets = [] + for descr in descrs[key]: + eisetr = [ei for ei in effectinfos + if descr in getattr(ei, '_readonly_descrs_' + key)] + eisetw = [ei for ei in effectinfos + if descr in getattr(ei, '_write_descrs_' + key)] + # these are the set of all ei such that this descr is in + # ei._readonly_descrs or ei._write_descrs + eisetr = frozenset(eisetr) + eisetw = frozenset(eisetw) + all_sets.append((descr, eisetr, eisetw)) + + # heuristic to reduce the total size of the bitstrings: start with + # numbering the descrs that are seen in many EffectInfos. If instead, + # by lack of chance, such a descr had a high number, then all these + # EffectInfos' bitstrings would need to store the same high number. + def size_of_both_sets((d, r, w)): + return len(r) + len(w) + all_sets.sort(key=size_of_both_sets, reverse=True) + + mapping = {} + for (descr, eisetr, eisetw) in all_sets: + assert descr.ei_index == sys.maxint # not modified yet + descr.ei_index = mapping.setdefault((eisetr, eisetw), len(mapping)) + + for ei in effectinfos: + bitstrr = [descr.ei_index + for descr in getattr(ei, '_readonly_descrs_' + key)] + bitstrw = [descr.ei_index + for descr in getattr(ei, '_write_descrs_' + key)] + assert sys.maxint not in bitstrr + assert sys.maxint not in bitstrw + bitstrr = bitstring.make_bitstring(bitstrr) + bitstrw = bitstring.make_bitstring(bitstrw) + setattr(ei, 'bitstring_readonly_descrs_' + key, bitstrr) + setattr(ei, 'bitstring_write_descrs_' + key, bitstrw) + seen.add(bitstrr) + seen.add(bitstrw) + + if seen: + mean_length = float(sum(len(x) for x in seen)) / len(seen) + max_length = max(len(x) for x in seen) + log("-> %d bitstrings, mean length %.1f, max length %d" % ( + len(seen), mean_length, max_length)) diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,11 +1,12 @@ -import pytest +import pytest, sys from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, - EffectInfo, VirtualizableAnalyzer) + EffectInfo, VirtualizableAnalyzer, compute_bitstrings) from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import OBJECT from rpython.translator.translator import TranslationContext, graphof +from rpython.tool.algo.bitstring import bitcheck class FakeCPU(object): @@ -29,37 +30,39 @@ S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.readonly_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._readonly_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays + assert effectinfo.single_write_descr_array is None def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._readonly_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] + assert effectinfo.single_write_descr_array == ('arraydescr', A) def test_dont_include_read_and_write_field(): @@ -67,9 +70,9 @@ effects = frozenset([("readstruct", lltype.Ptr(S), "a"), ("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_arrays def test_dont_include_read_and_write_array(): @@ -77,34 +80,34 @@ effects = frozenset([("readarray", lltype.Ptr(A)), ("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.readonly_descrs_arrays - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._readonly_descrs_arrays + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] def test_filter_out_typeptr(): effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays class TestVirtualizableAnalyzer(object): @@ -138,3 +141,64 @@ res = self.analyze(entry, [int]) assert not res + + +def test_compute_bitstrings(): + class FDescr: + pass + class ADescr: + pass + class CDescr: + def __init__(self, ei): + self._ei = ei + def get_extra_info(self): + return self._ei + + f1descr = FDescr() + f2descr = FDescr() + f3descr = FDescr() + a1descr = ADescr() + a2descr = ADescr() + + ei1 = EffectInfo(None, None, None, None, None, None, + EffectInfo.EF_RANDOM_EFFECTS) + ei2 = EffectInfo([f1descr], [], [], [], [], []) + ei3 = EffectInfo([f1descr], [a1descr, a2descr], [], [f2descr], [], []) + + compute_bitstrings([CDescr(ei1), CDescr(ei2), CDescr(ei3), + f1descr, f2descr, f3descr, a1descr, a2descr]) + + assert f1descr.ei_index in (0, 1) + assert f2descr.ei_index == 1 - f1descr.ei_index + assert f3descr.ei_index == sys.maxint + assert a1descr.ei_index == 0 + assert a2descr.ei_index == 0 + + assert ei1.bitstring_readonly_descrs_fields is None + assert ei1.bitstring_readonly_descrs_arrays is None + assert ei1.bitstring_write_descrs_fields is None + + def expand(bitstr): + return [n for n in range(10) if bitcheck(bitstr, n)] + + assert expand(ei2.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei2.bitstring_write_descrs_fields) == [] + assert expand(ei2.bitstring_readonly_descrs_arrays) == [] + assert expand(ei2.bitstring_write_descrs_arrays) == [] + + assert expand(ei3.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei3.bitstring_write_descrs_fields) == [f2descr.ei_index] + assert expand(ei3.bitstring_readonly_descrs_arrays) == [0] #a1descr,a2descr + assert expand(ei3.bitstring_write_descrs_arrays) == [] + + for ei in [ei2, ei3]: + for fdescr in [f1descr, f2descr]: + assert ei.check_readonly_descr_field(fdescr) == ( + fdescr in ei._readonly_descrs_fields) + assert ei.check_write_descr_field(fdescr) == ( + fdescr in ei._write_descrs_fields) + for adescr in [a1descr, a2descr]: + assert ei.check_readonly_descr_array(adescr) == ( + adescr in ei._readonly_descrs_arrays) + assert ei.check_write_descr_array(adescr) == ( + adescr in ei._write_descrs_arrays) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -209,7 +209,7 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(descr.get_extra_info().write_descrs_arrays) == 1): + descr.get_extra_info().single_write_descr_array is not None): # ARRAYCOPY with constant starts and constant length doesn't escape # its argument # XXX really? @@ -299,9 +299,9 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): - descr = effectinfo.write_descrs_arrays[0] + descr = effectinfo.single_write_descr_array cache = self.heap_array_cache.get(descr, None) srcstart = argboxes[3].getint() dststart = argboxes[4].getint() @@ -328,10 +328,10 @@ idx_cache._clear_cache_on_write(seen_allocation_of_target) return elif ( - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): # Fish the descr out of the effectinfo - cache = self.heap_array_cache.get(effectinfo.write_descrs_arrays[0], None) + cache = self.heap_array_cache.get(effectinfo.single_write_descr_array, None) if cache is not None: for idx, cache in cache.iteritems(): cache._clear_cache_on_write(seen_allocation_of_target) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -1,3 +1,4 @@ +import sys from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, Symbolic @@ -87,9 +88,10 @@ class AbstractDescr(AbstractValue): - __slots__ = ('descr_index',) + __slots__ = ('descr_index', 'ei_index') llopaque = True descr_index = -1 + ei_index = sys.maxint def repr_of_descr(self): return '%r' % (self,) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -432,28 +432,35 @@ optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION def force_from_effectinfo(self, effectinfo): - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_set(fielddescr) - for arraydescr in effectinfo.readonly_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr) - for fielddescr in effectinfo.write_descrs_fields: - if fielddescr.is_always_pure(): - continue - try: - del self.cached_dict_reads[fielddescr] - except KeyError: - pass - self.force_lazy_set(fielddescr, can_cache=False) - for arraydescr in effectinfo.write_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr, can_cache=False) - if arraydescr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(arraydescr) + # Note: this version of the code handles effectively + # effectinfos that store arbitrarily many descrs, by looping + # on self.cached_{fields, arrayitems} and looking them up in + # the bitstrings stored in the effectinfo. + for fielddescr, cf in self.cached_fields.items(): + if effectinfo.check_readonly_descr_field(fielddescr): + cf.force_lazy_set(self, fielddescr) + if effectinfo.check_write_descr_field(fielddescr): + if fielddescr.is_always_pure(): + continue + try: + del self.cached_dict_reads[fielddescr] + except KeyError: + pass + cf.force_lazy_set(self, fielddescr, can_cache=False) + # + for arraydescr, submap in self.cached_arrayitems.items(): + if effectinfo.check_readonly_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap) + if effectinfo.check_write_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap, can_cache=False) + # + for arraydescr, dictdescr in self.corresponding_array_descrs.items(): + if effectinfo.check_write_descr_array(arraydescr): try: del self.cached_dict_reads[dictdescr] except KeyError: pass # someone did it already + # if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_set(vrefinfo.descr_forced) @@ -476,6 +483,10 @@ if indexb is None or indexb.contains(idx): cf.force_lazy_set(self, None, can_cache) + def force_lazy_setarrayitem_submap(self, submap, can_cache=True): + for cf in submap.itervalues(): + cf.force_lazy_set(self, None, can_cache) + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -620,10 +620,10 @@ and length and ((dest_info and dest_info.is_virtual()) or length.getint() <= 8) and ((source_info and source_info.is_virtual()) or length.getint() <= 8) - and len(extrainfo.write_descrs_arrays) == 1): # <-sanity check + and extrainfo.single_write_descr_array is not None): #<-sanity check source_start = source_start_box.getint() dest_start = dest_start_box.getint() - arraydescr = extrainfo.write_descrs_arrays[0] + arraydescr = extrainfo.single_write_descr_array if arraydescr.is_array_of_structs(): return False # not supported right now diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import (TreeLoop, AbstractDescr, JitCellToken, TargetToken) from rpython.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists -from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.codewriter.effectinfo import EffectInfo, compute_bitstrings from rpython.jit.metainterp.logger import LogOperations from rpython.jit.tool.oparser import OpParser, pure_parse, convert_loop_to_trace from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr @@ -530,6 +530,7 @@ metainterp_sd.virtualref_info = self.vrefinfo if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection + compute_bitstrings(self.cpu.fetch_all_descrs()) # compile_data.enable_opts = self.enable_opts state = optimize_trace(metainterp_sd, None, compile_data) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1838,7 +1838,11 @@ self.cpu.propagate_exception_descr = exc_descr # self.globaldata = MetaInterpGlobalData(self) + + def finish_setup_descrs(self): + from rpython.jit.codewriter import effectinfo self.all_descrs = self.cpu.setup_descrs() + effectinfo.compute_bitstrings(self.all_descrs) def _setup_once(self): """Runtime setup needed by the various components of the JIT.""" diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -132,6 +132,7 @@ metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) stats.metainterp_sd = metainterp_sd metainterp_sd.finish_setup(cw) + metainterp_sd.finish_setup_descrs() [jitdriver_sd] = metainterp_sd.jitdrivers_sd metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -27,8 +27,12 @@ def __init__(self, extraeffect, oopspecindex, write_descrs_fields, write_descrs_arrays): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self._write_descrs_fields = write_descrs_fields + self._write_descrs_arrays = write_descrs_arrays + if len(write_descrs_arrays) == 1: + [self.single_write_descr_array] = write_descrs_arrays + else: + self.single_write_descr_array = None def has_random_effects(self): return self.extraeffect == self.EF_RANDOM_EFFECTS @@ -37,14 +41,14 @@ def __init__(self, extraeffect, oopspecindex=None, write_descrs_fields=[], write_descrs_arrays=[]): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self.__write_descrs_fields = write_descrs_fields + self.__write_descrs_arrays = write_descrs_arrays def get_extra_info(self): return FakeEffectinfo( self.extraeffect, self.oopspecindex, - write_descrs_fields=self.write_descrs_fields, - write_descrs_arrays=self.write_descrs_arrays, + write_descrs_fields=self.__write_descrs_fields, + write_descrs_arrays=self.__write_descrs_arrays, ) arraycopydescr1 = FakeCallDescr(FakeEffectinfo.EF_CANNOT_RAISE, FakeEffectinfo.OS_ARRAYCOPY, write_descrs_arrays=[descr1]) diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -624,7 +624,7 @@ pass def setup_descrs(self): - pass + return [] def get_latest_descr(self, deadframe): assert isinstance(deadframe, FakeDeadFrame) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -277,6 +277,7 @@ for vinfo in vinfos: if vinfo is not None: vinfo.finish() + self.metainterp_sd.finish_setup_descrs() if self.cpu.translate_support_code: self.annhelper.finish() diff --git a/rpython/tool/algo/bitstring.py b/rpython/tool/algo/bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/bitstring.py @@ -0,0 +1,23 @@ + + +def make_bitstring(lst): + "NOT_RPYTHON" + if not lst: + return '' + num_bits = max(lst) + 1 + num_bytes = (num_bits + 7) // 8 + entries = [0] * num_bytes + for x in lst: + assert x >= 0 + entries[x >> 3] |= 1 << (x & 7) + return ''.join(map(chr, entries)) + +def bitcheck(bitstring, n): + assert n >= 0 + byte_number = n >> 3 + if byte_number >= len(bitstring): + return False + return (ord(bitstring[byte_number]) & (1 << (n & 7))) != 0 + +def num_bits(bitstring): + return len(bitstring) << 3 diff --git a/rpython/tool/algo/test/test_bitstring.py b/rpython/tool/algo/test/test_bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/test/test_bitstring.py @@ -0,0 +1,25 @@ +from rpython.tool.algo.bitstring import * +from hypothesis import given, strategies + +def test_make(): + assert make_bitstring([]) == '' + assert make_bitstring([0]) == '\x01' + assert make_bitstring([7]) == '\x80' + assert make_bitstring([8]) == '\x00\x01' + assert make_bitstring([2, 4, 20]) == '\x14\x00\x10' + +def test_bitcheck(): + assert bitcheck('\x01', 0) is True + assert bitcheck('\x01', 1) is False + assert bitcheck('\x01', 10) is False + assert [n for n in range(32) if bitcheck('\x14\x00\x10', n)] == [2, 4, 20] + + at given(strategies.lists(strategies.integers(min_value=0, max_value=299))) +def test_random(lst): + bitstring = make_bitstring(lst) + assert set([n for n in range(300) if bitcheck(bitstring, n)]) == set(lst) + +def test_num_bits(): + assert num_bits('') == 0 + assert num_bits('a') == 8 + assert num_bits('bcd') == 24 diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -1,3 +1,4 @@ +import py from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext, graphof from rpython.translator.backendopt.writeanalyze import WriteAnalyzer, top_set @@ -314,6 +315,7 @@ assert T1 == T2 def test_cutoff(self): + py.test.skip("cutoff: disabled") from rpython.rlib.unroll import unrolling_iterable cutoff = 20 attrs = unrolling_iterable(["s%s" % i for i in range(cutoff + 5)]) diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -4,10 +4,14 @@ top_set = object() empty_set = frozenset() -CUTOFF = 3000 +# CUTOFF is disabled, as it gave a strangely not-working-any-more effect +# if the size of the result grows past that bound. The main user was +# optimizeopt/heap.py (force_from_effectinfo), which has been rewritten +# to be happy with any size now. +#CUTOFF = 3000 class WriteAnalyzer(graphanalyze.GraphAnalyzer): - cutoff = CUTOFF + #cutoff = CUTOFF def bottom_result(self): return empty_set @@ -25,8 +29,8 @@ if other is top_set: return top_set result.update(other) - if len(result) > self.cutoff: - return top_set + #if len(result) > self.cutoff: + # return top_set return result def finalize_builder(self, result): From pypy.commits at gmail.com Wed Apr 27 03:17:38 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 00:17:38 -0700 (PDT) Subject: [pypy-commit] pypy default: document branch Message-ID: <57206792.85661c0a.873b8.6d48@mx.google.com> Author: Armin Rigo Branch: Changeset: r83962:fc81b95f1837 Date: 2016-04-27 09:17 +0200 http://bitbucket.org/pypy/pypy/changeset/fc81b95f1837/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,9 @@ Remove a number of options from the build process that were never tested and never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. From pypy.commits at gmail.com Wed Apr 27 03:47:48 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 00:47:48 -0700 (PDT) Subject: [pypy-commit] pypy default: uh Message-ID: <57206ea4.171d1c0a.e23ea.08b4@mx.google.com> Author: Armin Rigo Branch: Changeset: r83963:65b5c6e301a2 Date: 2016-04-27 09:47 +0200 http://bitbucket.org/pypy/pypy/changeset/65b5c6e301a2/ Log: uh diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -32,7 +32,7 @@ reference in list at position i will be leaked. """ assert isinstance(w_list, W_ListObject) - assert 0 <= index < w_list.length + assert 0 <= index < w_list.length() # Deliberately leak, so that it can be safely decref'd. make_ref(space, w_list.getitem(index)) Py_DecRef(space, w_item) From pypy.commits at gmail.com Wed Apr 27 04:31:56 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 01:31:56 -0700 (PDT) Subject: [pypy-commit] pypy default: Document the AttributeError/TypeError difference with CPython about Message-ID: <572078fc.d1981c0a.4053e.391f@mx.google.com> Author: Armin Rigo Branch: Changeset: r83964:434d21e15d0d Date: 2016-04-27 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/434d21e15d0d/ Log: Document the AttributeError/TypeError difference with CPython about built-in types diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -387,6 +387,14 @@ wrappers. On PyPy we can't tell the difference, so ``ismethod([].__add__) == ismethod(list.__add__) == True``. +* in CPython, the built-in types have attributes that can be + implemented in various ways. Depending on the way, if you try to + write to (or delete) a read-only (or undeletable) attribute, you get + either a ``TypeError`` or an ``AttributeError``. PyPy tries to + strike some middle ground between full consistency and full + compatibility here. This means that a few corner cases don't raise + the same exception, like ``del (lambda:None).__closure__``. + * in pure Python, if you write ``class A(object): def f(self): pass`` and have a subclass ``B`` which doesn't override ``f()``, then ``B.f(x)`` still checks that ``x`` is an instance of ``B``. In From pypy.commits at gmail.com Wed Apr 27 04:56:27 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 01:56:27 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: merge default into branch Message-ID: <57207ebb.6322c20a.0c9b.ffffe894@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83967:05bf6936b917 Date: 2016-04-27 11:08 +0300 http://bitbucket.org/pypy/pypy/changeset/05bf6936b917/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,9 @@ Remove a number of options from the build process that were never tested and never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -162,7 +162,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -742,7 +742,7 @@ def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - last_operr = self._exc_info_unroll(space) + last_operr = self._exc_info_unroll(space, for_hidden=True) if last_operr is None: raise oefmt(space.w_TypeError, "No active exception to reraise") diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -60,6 +60,7 @@ import __pypy__ import sys + result = [False] @__pypy__.hidden_applevel def test_hidden_with_tb(): def not_hidden(): 1/0 @@ -68,9 +69,11 @@ assert sys.exc_info() == (None, None, None) tb = __pypy__.get_hidden_tb() assert tb.tb_frame.f_code.co_name == 'not_hidden' - return True + result[0] = True + raise else: return False - assert test_hidden_with_tb() + raises(ZeroDivisionError, test_hidden_with_tb) + assert result[0] def test_lookup_special(self): from __pypy__ import lookup_special diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,2 +1,1 @@ #define PyList_GET_ITEM PyList_GetItem -#define PyList_SET_ITEM PyList_SetItem diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -21,6 +21,25 @@ """ return space.newlist([None] * len) + at cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) +def PyList_SET_ITEM(space, w_list, index, w_item): + """Macro form of PyList_SetItem() without error checking. This is normally + only used to fill in new lists where there is no previous content. + + This function "steals" a reference to item, and, unlike PyList_SetItem(), + does not discard a reference to any item that it being replaced; any + reference in list at position i will be leaked. + """ + assert isinstance(w_list, W_ListObject) + assert 0 <= index < w_list.length() + # Deliberately leak, so that it can be safely decref'd. + make_ref(space, w_list.getitem(index)) + Py_DecRef(space, w_item) + w_list.setitem(index, w_item) + return w_item + + @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyList_SetItem(space, w_list, index, w_item): """Set the item at index index in list to item. Return 0 on success diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -136,3 +136,45 @@ l = [1, 2, 3] module.setlistitem(l,0) assert l == [None, 2, 3] + + def test_get_item_macro(self): + module = self.import_extension('foo', [ + ("test_get_item", "METH_NOARGS", + """ + PyObject* o = PyList_New(1); + + PyObject* o2 = PyInt_FromLong(0); + PyList_SET_ITEM(o, 0, o2); + o2 = NULL; + + PyObject* o3 = PyList_GET_ITEM(o, 0); + Py_INCREF(o3); + Py_CLEAR(o); + return o3; + """)]) + assert module.test_get_item() == 0 + + def test_set_item_macro(self): + """PyList_SET_ITEM leaks a reference to the target.""" + module = self.import_extension('foo', [ + ("test_refcount_diff_after_setitem", "METH_NOARGS", + """ + PyObject* o = PyList_New(0); + PyObject* o2 = PyList_New(0); + + PyList_Append(o, o2); // does not steal o2 + + Py_ssize_t refcount = Py_REFCNT(o2); + + // Steal a reference to o2, but leak the old reference to o2. + // The net result should be no change in refcount. + PyList_SET_ITEM(o, 0, o2); + + Py_ssize_t new_refcount = Py_REFCNT(o2); + + Py_CLEAR(o); + Py_DECREF(o2); // append incref'd. + // Py_CLEAR(o2); // naive implementation would fail here. + return PyLong_FromSsize_t(new_refcount - refcount); + """)]) + assert module.test_refcount_diff_after_setitem() == 0 diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4577,6 +4577,13 @@ with py.test.raises(AnnotatorError): a.build_types(f, [float]) + def test_Ellipsis_not_rpython(self): + def f(): + return Ellipsis + a = self.RPythonAnnotator() + e = py.test.raises(Exception, a.build_types, f, []) + assert str(e.value) == "Don't know how to represent Ellipsis" + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -479,6 +479,9 @@ all_descrs.append(v) return all_descrs + def fetch_all_descrs(self): + return self.descrs.values() + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -1,7 +1,9 @@ +import sys from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem from rpython.rtyper.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer +from rpython.tool.algo import bitstring class EffectInfo(object): @@ -110,12 +112,20 @@ can_invalidate=False, call_release_gil_target=_NO_CALL_RELEASE_GIL_TARGET, extradescrs=None): - key = (frozenset_or_none(readonly_descrs_fields), - frozenset_or_none(readonly_descrs_arrays), - frozenset_or_none(readonly_descrs_interiorfields), - frozenset_or_none(write_descrs_fields), - frozenset_or_none(write_descrs_arrays), - frozenset_or_none(write_descrs_interiorfields), + readonly_descrs_fields = frozenset_or_none(readonly_descrs_fields) + readonly_descrs_arrays = frozenset_or_none(readonly_descrs_arrays) + readonly_descrs_interiorfields = frozenset_or_none( + readonly_descrs_interiorfields) + write_descrs_fields = frozenset_or_none(write_descrs_fields) + write_descrs_arrays = frozenset_or_none(write_descrs_arrays) + write_descrs_interiorfields = frozenset_or_none( + write_descrs_interiorfields) + key = (readonly_descrs_fields, + readonly_descrs_arrays, + readonly_descrs_interiorfields, + write_descrs_fields, + write_descrs_arrays, + write_descrs_interiorfields, extraeffect, oopspecindex, can_invalidate) @@ -139,22 +149,34 @@ assert write_descrs_arrays is not None assert write_descrs_interiorfields is not None result = object.__new__(cls) - result.readonly_descrs_fields = readonly_descrs_fields - result.readonly_descrs_arrays = readonly_descrs_arrays - result.readonly_descrs_interiorfields = readonly_descrs_interiorfields + # the frozensets "._readonly_xxx" and "._write_xxx" should not be + # translated. + result._readonly_descrs_fields = readonly_descrs_fields + result._readonly_descrs_arrays = readonly_descrs_arrays + result._readonly_descrs_interiorfields = readonly_descrs_interiorfields if extraeffect == EffectInfo.EF_LOOPINVARIANT or \ extraeffect == EffectInfo.EF_ELIDABLE_CANNOT_RAISE or \ extraeffect == EffectInfo.EF_ELIDABLE_OR_MEMORYERROR or \ extraeffect == EffectInfo.EF_ELIDABLE_CAN_RAISE: # Ignore the writes. Note that this ignores also writes with # no corresponding reads (rarely the case, but possible). - result.write_descrs_fields = [] - result.write_descrs_arrays = [] - result.write_descrs_interiorfields = [] + result._write_descrs_fields = frozenset() + result._write_descrs_arrays = frozenset() + result._write_descrs_interiorfields = frozenset() else: - result.write_descrs_fields = write_descrs_fields - result.write_descrs_arrays = write_descrs_arrays - result.write_descrs_interiorfields = write_descrs_interiorfields + result._write_descrs_fields = write_descrs_fields + result._write_descrs_arrays = write_descrs_arrays + result._write_descrs_interiorfields = write_descrs_interiorfields + # initialized later, in compute_bitstrings() + # (the goal of this is to make sure we don't build new EffectInfo + # instances after compute_bitstrings() is called) + result.bitstring_readonly_descrs_fields = Ellipsis + result.bitstring_readonly_descrs_arrays = Ellipsis + result.bitstring_readonly_descrs_interiorfields = Ellipsis + result.bitstring_write_descrs_fields = Ellipsis + result.bitstring_write_descrs_arrays = Ellipsis + result.bitstring_write_descrs_interiorfields = Ellipsis + # result.extraeffect = extraeffect result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex @@ -162,9 +184,38 @@ result.call_release_gil_target = call_release_gil_target if result.check_can_raise(ignore_memoryerror=True): assert oopspecindex in cls._OS_CANRAISE + + if (result._write_descrs_arrays is not None and + len(result._write_descrs_arrays) == 1): + # this is used only for ARRAYCOPY operations + [result.single_write_descr_array] = result._write_descrs_arrays + else: + result.single_write_descr_array = None + cls._cache[key] = result return result + def check_readonly_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_fields, + fielddescr.ei_index) + def check_write_descr_field(self, fielddescr): + return bitstring.bitcheck(self.bitstring_write_descrs_fields, + fielddescr.ei_index) + def check_readonly_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_readonly_descrs_arrays, + arraydescr.ei_index) + def check_write_descr_array(self, arraydescr): + return bitstring.bitcheck(self.bitstring_write_descrs_arrays, + arraydescr.ei_index) + def check_readonly_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far + return bitstring.bitcheck(self.bitstring_readonly_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_write_descr_interiorfield(self, interiorfielddescr): + # NOTE: this is not used so far + return bitstring.bitcheck(self.bitstring_write_descrs_interiorfields, + interiorfielddescr.ei_index) + def check_can_raise(self, ignore_memoryerror=False): if ignore_memoryerror: return self.extraeffect > self.EF_ELIDABLE_OR_MEMORYERROR @@ -382,3 +433,88 @@ assert funcptr return funcptr funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(1)' + +# ____________________________________________________________ + +def compute_bitstrings(all_descrs): + # Compute the bitstrings in the EffectInfo, + # bitstring_{readonly,write}_descrs_{fieldd,arrays,interiordescrs}, + # and for each FieldDescrs and ArrayDescrs compute 'ei_index'. + # Each bit in the bitstrings says whether this Descr is present in + # this EffectInfo or not. We try to share the value of 'ei_index' + # across multiple Descrs if they always give the same answer (in + # PyPy, it reduces the length of the bitstrings from 4000+ to + # 373). + from rpython.jit.codewriter.policy import log + + log("compute_bitstrings:") + effectinfos = [] + descrs = {'fields': set(), 'arrays': set(), 'interiorfields': set()} + for descr in all_descrs: + if hasattr(descr, 'get_extra_info'): + ei = descr.get_extra_info() + if ei is None: + continue + if ei._readonly_descrs_fields is None: + for key in descrs: + assert getattr(ei, '_readonly_descrs_' + key) is None + assert getattr(ei, '_write_descrs_' + key) is None + setattr(ei, 'bitstring_readonly_descrs_' + key, None) + setattr(ei, 'bitstring_write_descrs_' + key, None) + else: + effectinfos.append(ei) + for key in descrs: + descrs[key].update(getattr(ei, '_readonly_descrs_' + key)) + descrs[key].update(getattr(ei, '_write_descrs_' + key)) + else: + descr.ei_index = sys.maxint + log(" %d effectinfos:" % (len(effectinfos),)) + for key in sorted(descrs): + log(" %d descrs for %s" % (len(descrs[key]), key)) + + seen = set() + for key in descrs: + all_sets = [] + for descr in descrs[key]: + eisetr = [ei for ei in effectinfos + if descr in getattr(ei, '_readonly_descrs_' + key)] + eisetw = [ei for ei in effectinfos + if descr in getattr(ei, '_write_descrs_' + key)] + # these are the set of all ei such that this descr is in + # ei._readonly_descrs or ei._write_descrs + eisetr = frozenset(eisetr) + eisetw = frozenset(eisetw) + all_sets.append((descr, eisetr, eisetw)) + + # heuristic to reduce the total size of the bitstrings: start with + # numbering the descrs that are seen in many EffectInfos. If instead, + # by lack of chance, such a descr had a high number, then all these + # EffectInfos' bitstrings would need to store the same high number. + def size_of_both_sets((d, r, w)): + return len(r) + len(w) + all_sets.sort(key=size_of_both_sets, reverse=True) + + mapping = {} + for (descr, eisetr, eisetw) in all_sets: + assert descr.ei_index == sys.maxint # not modified yet + descr.ei_index = mapping.setdefault((eisetr, eisetw), len(mapping)) + + for ei in effectinfos: + bitstrr = [descr.ei_index + for descr in getattr(ei, '_readonly_descrs_' + key)] + bitstrw = [descr.ei_index + for descr in getattr(ei, '_write_descrs_' + key)] + assert sys.maxint not in bitstrr + assert sys.maxint not in bitstrw + bitstrr = bitstring.make_bitstring(bitstrr) + bitstrw = bitstring.make_bitstring(bitstrw) + setattr(ei, 'bitstring_readonly_descrs_' + key, bitstrr) + setattr(ei, 'bitstring_write_descrs_' + key, bitstrw) + seen.add(bitstrr) + seen.add(bitstrw) + + if seen: + mean_length = float(sum(len(x) for x in seen)) / len(seen) + max_length = max(len(x) for x in seen) + log("-> %d bitstrings, mean length %.1f, max length %d" % ( + len(seen), mean_length, max_length)) diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,11 +1,12 @@ -import pytest +import pytest, sys from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, - EffectInfo, VirtualizableAnalyzer) + EffectInfo, VirtualizableAnalyzer, compute_bitstrings) from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import OBJECT from rpython.translator.translator import TranslationContext, graphof +from rpython.tool.algo.bitstring import bitcheck class FakeCPU(object): @@ -29,37 +30,39 @@ S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.readonly_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._readonly_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays + assert effectinfo.single_write_descr_array is None def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_arrays + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)] - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._readonly_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] + assert effectinfo.single_write_descr_array == ('arraydescr', A) def test_dont_include_read_and_write_field(): @@ -67,9 +70,9 @@ effects = frozenset([("readstruct", lltype.Ptr(S), "a"), ("struct", lltype.Ptr(S), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert list(effectinfo._write_descrs_fields) == [('fielddescr', S, "a")] + assert not effectinfo._write_descrs_arrays def test_dont_include_read_and_write_array(): @@ -77,34 +80,34 @@ effects = frozenset([("readarray", lltype.Ptr(A)), ("array", lltype.Ptr(A))]) effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU()) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.readonly_descrs_arrays - assert not effectinfo.write_descrs_fields - assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._readonly_descrs_arrays + assert not effectinfo._write_descrs_fields + assert list(effectinfo._write_descrs_arrays) == [('arraydescr', A)] def test_filter_out_typeptr(): effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays + assert not effectinfo._readonly_descrs_fields + assert not effectinfo._write_descrs_fields + assert not effectinfo._write_descrs_arrays class TestVirtualizableAnalyzer(object): @@ -138,3 +141,64 @@ res = self.analyze(entry, [int]) assert not res + + +def test_compute_bitstrings(): + class FDescr: + pass + class ADescr: + pass + class CDescr: + def __init__(self, ei): + self._ei = ei + def get_extra_info(self): + return self._ei + + f1descr = FDescr() + f2descr = FDescr() + f3descr = FDescr() + a1descr = ADescr() + a2descr = ADescr() + + ei1 = EffectInfo(None, None, None, None, None, None, + EffectInfo.EF_RANDOM_EFFECTS) + ei2 = EffectInfo([f1descr], [], [], [], [], []) + ei3 = EffectInfo([f1descr], [a1descr, a2descr], [], [f2descr], [], []) + + compute_bitstrings([CDescr(ei1), CDescr(ei2), CDescr(ei3), + f1descr, f2descr, f3descr, a1descr, a2descr]) + + assert f1descr.ei_index in (0, 1) + assert f2descr.ei_index == 1 - f1descr.ei_index + assert f3descr.ei_index == sys.maxint + assert a1descr.ei_index == 0 + assert a2descr.ei_index == 0 + + assert ei1.bitstring_readonly_descrs_fields is None + assert ei1.bitstring_readonly_descrs_arrays is None + assert ei1.bitstring_write_descrs_fields is None + + def expand(bitstr): + return [n for n in range(10) if bitcheck(bitstr, n)] + + assert expand(ei2.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei2.bitstring_write_descrs_fields) == [] + assert expand(ei2.bitstring_readonly_descrs_arrays) == [] + assert expand(ei2.bitstring_write_descrs_arrays) == [] + + assert expand(ei3.bitstring_readonly_descrs_fields) == [f1descr.ei_index] + assert expand(ei3.bitstring_write_descrs_fields) == [f2descr.ei_index] + assert expand(ei3.bitstring_readonly_descrs_arrays) == [0] #a1descr,a2descr + assert expand(ei3.bitstring_write_descrs_arrays) == [] + + for ei in [ei2, ei3]: + for fdescr in [f1descr, f2descr]: + assert ei.check_readonly_descr_field(fdescr) == ( + fdescr in ei._readonly_descrs_fields) + assert ei.check_write_descr_field(fdescr) == ( + fdescr in ei._write_descrs_fields) + for adescr in [a1descr, a2descr]: + assert ei.check_readonly_descr_array(adescr) == ( + adescr in ei._readonly_descrs_arrays) + assert ei.check_write_descr_array(adescr) == ( + adescr in ei._write_descrs_arrays) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -209,7 +209,7 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(descr.get_extra_info().write_descrs_arrays) == 1): + descr.get_extra_info().single_write_descr_array is not None): # ARRAYCOPY with constant starts and constant length doesn't escape # its argument # XXX really? @@ -299,9 +299,9 @@ isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and isinstance(argboxes[5], ConstInt) and - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): - descr = effectinfo.write_descrs_arrays[0] + descr = effectinfo.single_write_descr_array cache = self.heap_array_cache.get(descr, None) srcstart = argboxes[3].getint() dststart = argboxes[4].getint() @@ -328,10 +328,10 @@ idx_cache._clear_cache_on_write(seen_allocation_of_target) return elif ( - len(effectinfo.write_descrs_arrays) == 1 + effectinfo.single_write_descr_array is not None ): # Fish the descr out of the effectinfo - cache = self.heap_array_cache.get(effectinfo.write_descrs_arrays[0], None) + cache = self.heap_array_cache.get(effectinfo.single_write_descr_array, None) if cache is not None: for idx, cache in cache.iteritems(): cache._clear_cache_on_write(seen_allocation_of_target) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -1,3 +1,4 @@ +import sys from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, Symbolic @@ -87,9 +88,10 @@ class AbstractDescr(AbstractValue): - __slots__ = ('descr_index',) + __slots__ = ('descr_index', 'ei_index') llopaque = True descr_index = -1 + ei_index = sys.maxint def repr_of_descr(self): return '%r' % (self,) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -432,28 +432,35 @@ optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION def force_from_effectinfo(self, effectinfo): - # XXX we can get the wrong complexity here, if the lists - # XXX stored on effectinfo are large - for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_set(fielddescr) - for arraydescr in effectinfo.readonly_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr) - for fielddescr in effectinfo.write_descrs_fields: - if fielddescr.is_always_pure(): - continue - try: - del self.cached_dict_reads[fielddescr] - except KeyError: - pass - self.force_lazy_set(fielddescr, can_cache=False) - for arraydescr in effectinfo.write_descrs_arrays: - self.force_lazy_setarrayitem(arraydescr, can_cache=False) - if arraydescr in self.corresponding_array_descrs: - dictdescr = self.corresponding_array_descrs.pop(arraydescr) + # Note: this version of the code handles effectively + # effectinfos that store arbitrarily many descrs, by looping + # on self.cached_{fields, arrayitems} and looking them up in + # the bitstrings stored in the effectinfo. + for fielddescr, cf in self.cached_fields.items(): + if effectinfo.check_readonly_descr_field(fielddescr): + cf.force_lazy_set(self, fielddescr) + if effectinfo.check_write_descr_field(fielddescr): + if fielddescr.is_always_pure(): + continue + try: + del self.cached_dict_reads[fielddescr] + except KeyError: + pass + cf.force_lazy_set(self, fielddescr, can_cache=False) + # + for arraydescr, submap in self.cached_arrayitems.items(): + if effectinfo.check_readonly_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap) + if effectinfo.check_write_descr_array(arraydescr): + self.force_lazy_setarrayitem_submap(submap, can_cache=False) + # + for arraydescr, dictdescr in self.corresponding_array_descrs.items(): + if effectinfo.check_write_descr_array(arraydescr): try: del self.cached_dict_reads[dictdescr] except KeyError: pass # someone did it already + # if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info self.force_lazy_set(vrefinfo.descr_forced) @@ -476,6 +483,10 @@ if indexb is None or indexb.contains(idx): cf.force_lazy_set(self, None, can_cache) + def force_lazy_setarrayitem_submap(self, submap, can_cache=True): + for cf in submap.itervalues(): + cf.force_lazy_set(self, None, can_cache) + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -620,10 +620,10 @@ and length and ((dest_info and dest_info.is_virtual()) or length.getint() <= 8) and ((source_info and source_info.is_virtual()) or length.getint() <= 8) - and len(extrainfo.write_descrs_arrays) == 1): # <-sanity check + and extrainfo.single_write_descr_array is not None): #<-sanity check source_start = source_start_box.getint() dest_start = dest_start_box.getint() - arraydescr = extrainfo.write_descrs_arrays[0] + arraydescr = extrainfo.single_write_descr_array if arraydescr.is_array_of_structs(): return False # not supported right now diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import (TreeLoop, AbstractDescr, JitCellToken, TargetToken) from rpython.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists -from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.codewriter.effectinfo import EffectInfo, compute_bitstrings from rpython.jit.metainterp.logger import LogOperations from rpython.jit.tool.oparser import OpParser, pure_parse, convert_loop_to_trace from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr @@ -530,6 +530,7 @@ metainterp_sd.virtualref_info = self.vrefinfo if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection + compute_bitstrings(self.cpu.fetch_all_descrs()) # compile_data.enable_opts = self.enable_opts state = optimize_trace(metainterp_sd, None, compile_data) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1838,7 +1838,11 @@ self.cpu.propagate_exception_descr = exc_descr # self.globaldata = MetaInterpGlobalData(self) + + def finish_setup_descrs(self): + from rpython.jit.codewriter import effectinfo self.all_descrs = self.cpu.setup_descrs() + effectinfo.compute_bitstrings(self.all_descrs) def _setup_once(self): """Runtime setup needed by the various components of the JIT.""" diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -132,6 +132,7 @@ metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) stats.metainterp_sd = metainterp_sd metainterp_sd.finish_setup(cw) + metainterp_sd.finish_setup_descrs() [jitdriver_sd] = metainterp_sd.jitdrivers_sd metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -27,8 +27,12 @@ def __init__(self, extraeffect, oopspecindex, write_descrs_fields, write_descrs_arrays): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self._write_descrs_fields = write_descrs_fields + self._write_descrs_arrays = write_descrs_arrays + if len(write_descrs_arrays) == 1: + [self.single_write_descr_array] = write_descrs_arrays + else: + self.single_write_descr_array = None def has_random_effects(self): return self.extraeffect == self.EF_RANDOM_EFFECTS @@ -37,14 +41,14 @@ def __init__(self, extraeffect, oopspecindex=None, write_descrs_fields=[], write_descrs_arrays=[]): self.extraeffect = extraeffect self.oopspecindex = oopspecindex - self.write_descrs_fields = write_descrs_fields - self.write_descrs_arrays = write_descrs_arrays + self.__write_descrs_fields = write_descrs_fields + self.__write_descrs_arrays = write_descrs_arrays def get_extra_info(self): return FakeEffectinfo( self.extraeffect, self.oopspecindex, - write_descrs_fields=self.write_descrs_fields, - write_descrs_arrays=self.write_descrs_arrays, + write_descrs_fields=self.__write_descrs_fields, + write_descrs_arrays=self.__write_descrs_arrays, ) arraycopydescr1 = FakeCallDescr(FakeEffectinfo.EF_CANNOT_RAISE, FakeEffectinfo.OS_ARRAYCOPY, write_descrs_arrays=[descr1]) diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -624,7 +624,7 @@ pass def setup_descrs(self): - pass + return [] def get_latest_descr(self, deadframe): assert isinstance(deadframe, FakeDeadFrame) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -277,6 +277,7 @@ for vinfo in vinfos: if vinfo is not None: vinfo.finish() + self.metainterp_sd.finish_setup_descrs() if self.cpu.translate_support_code: self.annhelper.finish() diff --git a/rpython/tool/algo/bitstring.py b/rpython/tool/algo/bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/bitstring.py @@ -0,0 +1,23 @@ + + +def make_bitstring(lst): + "NOT_RPYTHON" + if not lst: + return '' + num_bits = max(lst) + 1 + num_bytes = (num_bits + 7) // 8 + entries = [0] * num_bytes + for x in lst: + assert x >= 0 + entries[x >> 3] |= 1 << (x & 7) + return ''.join(map(chr, entries)) + +def bitcheck(bitstring, n): + assert n >= 0 + byte_number = n >> 3 + if byte_number >= len(bitstring): + return False + return (ord(bitstring[byte_number]) & (1 << (n & 7))) != 0 + +def num_bits(bitstring): + return len(bitstring) << 3 diff --git a/rpython/tool/algo/test/test_bitstring.py b/rpython/tool/algo/test/test_bitstring.py new file mode 100644 --- /dev/null +++ b/rpython/tool/algo/test/test_bitstring.py @@ -0,0 +1,25 @@ +from rpython.tool.algo.bitstring import * +from hypothesis import given, strategies + +def test_make(): + assert make_bitstring([]) == '' + assert make_bitstring([0]) == '\x01' + assert make_bitstring([7]) == '\x80' + assert make_bitstring([8]) == '\x00\x01' + assert make_bitstring([2, 4, 20]) == '\x14\x00\x10' + +def test_bitcheck(): + assert bitcheck('\x01', 0) is True + assert bitcheck('\x01', 1) is False + assert bitcheck('\x01', 10) is False + assert [n for n in range(32) if bitcheck('\x14\x00\x10', n)] == [2, 4, 20] + + at given(strategies.lists(strategies.integers(min_value=0, max_value=299))) +def test_random(lst): + bitstring = make_bitstring(lst) + assert set([n for n in range(300) if bitcheck(bitstring, n)]) == set(lst) + +def test_num_bits(): + assert num_bits('') == 0 + assert num_bits('a') == 8 + assert num_bits('bcd') == 24 diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -1,3 +1,4 @@ +import py from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext, graphof from rpython.translator.backendopt.writeanalyze import WriteAnalyzer, top_set @@ -314,6 +315,7 @@ assert T1 == T2 def test_cutoff(self): + py.test.skip("cutoff: disabled") from rpython.rlib.unroll import unrolling_iterable cutoff = 20 attrs = unrolling_iterable(["s%s" % i for i in range(cutoff + 5)]) diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -4,10 +4,14 @@ top_set = object() empty_set = frozenset() -CUTOFF = 3000 +# CUTOFF is disabled, as it gave a strangely not-working-any-more effect +# if the size of the result grows past that bound. The main user was +# optimizeopt/heap.py (force_from_effectinfo), which has been rewritten +# to be happy with any size now. +#CUTOFF = 3000 class WriteAnalyzer(graphanalyze.GraphAnalyzer): - cutoff = CUTOFF + #cutoff = CUTOFF def bottom_result(self): return empty_set @@ -25,8 +29,8 @@ if other is top_set: return top_set result.update(other) - if len(result) > self.cutoff: - return top_set + #if len(result) > self.cutoff: + # return top_set return result def finalize_builder(self, result): From pypy.commits at gmail.com Wed Apr 27 04:56:23 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 01:56:23 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: fix or skip runappdirect variations of tests Message-ID: <57207eb7.6614c20a.243bf.ffffedaf@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83965:cfc882e1b8c2 Date: 2016-04-27 09:18 +0300 http://bitbucket.org/pypy/pypy/changeset/cfc882e1b8c2/ Log: fix or skip runappdirect variations of tests diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -220,11 +220,6 @@ assert isinstance(w_type, W_TypeObject) return get_typedescr(w_type.layout.typedef).realize(space, ref) - -def debug_collect(): - rawrefcount._collect() - - def as_pyobj(space, w_obj): """ Returns a 'PyObject *' representing the given intepreter object. diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -210,7 +210,7 @@ 0, /*tp_getattro*/ (setattrofunc)foo_setattro, /*tp_setattro*/ 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /*tp_flags*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ foo_doc, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -2,22 +2,21 @@ import weakref import os -import py +import py, pytest from pypy.conftest import pypydir -from pypy.interpreter.error import OperationError from pypy.interpreter import gateway -from rpython.rtyper.lltypesystem import rffi, lltype, ll2ctypes +from rpython.rtyper.lltypesystem import lltype, ll2ctypes from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import platform from rpython.translator.gensupp import uniquemodulename from rpython.tool.udir import udir from pypy.module.cpyext import api from pypy.module.cpyext.state import State -from pypy.module.cpyext.pyobject import debug_collect -from pypy.module.cpyext.pyobject import Py_DecRef, InvalidPointerException +from pypy.module.cpyext.pyobject import Py_DecRef from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder +from rpython.rlib import rawrefcount def setup_module(module): if os.name == 'nt': @@ -164,7 +163,7 @@ state.reset_borrowed_references() def check_and_print_leaks(self): - debug_collect() + rawrefcount._collect() # check for sane refcnts import gc @@ -218,7 +217,10 @@ class AppTestApi(LeakCheckingTest): def setup_class(cls): from rpython.rlib.clibffi import get_libc_name - cls.w_libc = cls.space.wrap(get_libc_name()) + if cls.runappdirect: + cls.libc = get_libc_name() + else: + cls.w_libc = cls.space.wrap(get_libc_name()) def setup_method(self, meth): freeze_refcnts(self) @@ -233,9 +235,11 @@ "the test actually passed in the first place; if it failed " "it is likely to reach this place.") + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_only_import(self): import cpyext + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -347,7 +351,11 @@ @gateway.unwrap_spec(mod=str, name=str) def reimport_module(space, mod, name): - api.load_extension_module(space, mod, name) + if self.runappdirect: + import imp + return imp.load_dynamic(name, mod) + else: + api.load_extension_module(space, mod, name) return space.getitem( space.sys.get('modules'), space.wrap(name)) @@ -394,6 +402,9 @@ """ self.imported_module_names.append(name) + def debug_collect(space): + rawrefcount._collect() + # A list of modules which the test caused to be imported (in # self.space). These will be cleaned up automatically in teardown. self.imported_module_names = [] @@ -585,6 +596,8 @@ If `cherry.date` is an extension module which imports `apple.banana`, the latter is added to `sys.modules` for the `"apple.banana"` key. """ + if self.runappdirect: + skip('record_imported_module not supported in runappdirect mode') # Build the extensions. banana = self.compile_module( "apple.banana", separate_module_files=[self.here + 'banana.c']) @@ -778,7 +791,8 @@ def test_internal_exceptions(self): - import sys + if self.runappdirect: + skip('cannot import module with undefined functions') init = """ if (Py_IsInitialized()) Py_InitModule("foo", methods); @@ -846,6 +860,7 @@ ]) raises(SystemError, mod.newexc, "name", Exception, {}) + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy specific test') def test_hash_pointer(self): mod = self.import_extension('foo', [ ('get_hash', 'METH_NOARGS', @@ -896,6 +911,7 @@ print p assert 'py' in p + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_get_version(self): mod = self.import_extension('foo', [ ('get_version', 'METH_NOARGS', diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -215,7 +215,11 @@ class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) - cls.w_tmpname = cls.space.wrap(str(py.test.ensuretemp("out", dir=0))) + tmpname = str(py.test.ensuretemp('out', dir=0)) + if cls.runappdirect: + cls.tmpname = tmpname + else: + cls.w_tmpname = cls.space.wrap(tmpname) def test_object_malloc(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Wed Apr 27 04:56:25 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 01:56:25 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: skip runappdirect tests for merge Message-ID: <57207eb9.c42e1c0a.21369.ffffdf2d@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83966:2e7438e4e79f Date: 2016-04-27 09:19 +0300 http://bitbucket.org/pypy/pypy/changeset/2e7438e4e79f/ Log: skip runappdirect tests for merge diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py --- a/pypy/module/cpyext/test/test_frameobject.py +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -57,15 +57,17 @@ Py_XDECREF(py_frame); return NULL; """), - ]) + ], prologue='#include "frameobject.h"') exc = raises(ValueError, module.raise_exception) - frame = exc.traceback.tb_frame - assert frame.f_code.co_filename == "filename" - assert frame.f_code.co_name == "funcname" + exc.value[0] == 'error message' + if not self.runappdirect: + frame = exc.traceback.tb_frame + assert frame.f_code.co_filename == "filename" + assert frame.f_code.co_name == "funcname" - # Cython does not work on CPython as well... - assert exc.traceback.tb_lineno == 42 # should be 48 - assert frame.f_lineno == 42 + # Cython does not work on CPython as well... + assert exc.traceback.tb_lineno == 42 # should be 48 + assert frame.f_lineno == 42 def test_traceback_check(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1,10 +1,10 @@ -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import rffi from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.typeobject import PyTypeObjectPtr -import py +import pytest import sys class AppTestTypeObject(AppTestCpythonExtensionBase): @@ -123,7 +123,9 @@ obj = module.fooType.classmeth() assert obj is module.fooType + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='cpython segfaults') def test_new(self): + # XXX cpython segfaults but if run singly (with -k test_new) this passes module = self.import_module(name='foo') obj = module.new() # call __new__ @@ -176,6 +178,8 @@ x = module.MetaType('name', (), {}) assert isinstance(x, type) assert isinstance(x, module.MetaType) + if self.runappdirect and '__pypy__' in sys.builtin_module_names: + skip('x is not callable when runappdirect??') x() def test_metaclass_compatible(self): @@ -185,6 +189,8 @@ assert type(module.fooType).__mro__ == (type, object) y = module.MetaType('other', (module.MetaType,), {}) assert isinstance(y, module.MetaType) + if self.runappdirect and '__pypy__' in sys.builtin_module_names: + skip('y is not callable when runappdirect??') x = y('something', (type(y),), {}) del x, y @@ -323,7 +329,7 @@ return NULL; Py_DECREF(a1); PyType_Modified(type); - value = PyObject_GetAttrString(type, "a"); + value = PyObject_GetAttrString((PyObject*)type, "a"); Py_DECREF(value); if (PyDict_SetItemString(type->tp_dict, "a", @@ -331,7 +337,7 @@ return NULL; Py_DECREF(a2); PyType_Modified(type); - value = PyObject_GetAttrString(type, "a"); + value = PyObject_GetAttrString((PyObject*)type, "a"); return value; ''' ) @@ -885,7 +891,9 @@ #print('calling module.footype()...') module.footype("X", (object,), {}) + @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='cpython fails') def test_app_subclass_of_c_type(self): + # on cpython, the size changes (6 bytes added) module = self.import_module(name='foo') size = module.size_of_instances(module.fooType) class f1(object): From pypy.commits at gmail.com Wed Apr 27 04:56:29 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 01:56:29 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: document branch Message-ID: <57207ebd.47afc20a.9c2cb.ffffef81@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83968:2683affc5a62 Date: 2016-04-27 11:55 +0300 http://bitbucket.org/pypy/pypy/changeset/2683affc5a62/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -22,3 +22,24 @@ JIT: use bitstrings to compress the lists of read or written descrs that we attach to EffectInfo. Fixes a problem we had in remove-objspace-options. + +.. branch: cpyext-for-merge +Update cpyext C-API support: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots +After this branch, we are almost able to support upstream numpy via cpyext, so +we created (yet another) fork of numpy at github.com/pypy/numpy with the needed +changes From pypy.commits at gmail.com Wed Apr 27 05:15:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 27 Apr 2016 02:15:53 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: added jitlog_disable to the _vmprof module Message-ID: <57208349.4ea81c0a.8139a.5993@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83969:5e6017560e6b Date: 2016-04-27 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/5e6017560e6b/ Log: added jitlog_disable to the _vmprof module diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -12,6 +12,7 @@ 'enable': 'interp_vmprof.enable', 'enable_jitlog': 'interp_vmprof.enable_jitlog', 'disable': 'interp_vmprof.disable', + 'disable_jitlog': 'interp_vmprof.disable_jitlog', 'write_all_code_objects': 'interp_vmprof.write_all_code_objects', 'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError', } diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -77,6 +77,10 @@ except rvmprof.VMProfError, e: raise VMProfError(space, e) +def disable_jitlog(space): + """ Disable PyPy's logging facility. """ + rvmprof.disable_jitlog() + def write_all_code_objects(space): """ Needed on cpython, just empty function here """ diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -131,6 +131,9 @@ blob = jl.assemble_header() self.cintf.jitlog_write_marked(jl.MARK_JITLOG_HEADER, blob, len(blob)) + def disable_jitlog(self): + self.cintf.jitlog_teardown() + def disable(self): """Disable vmprof. Raises VMProfError if something goes wrong. From pypy.commits at gmail.com Wed Apr 27 05:47:17 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 02:47:17 -0700 (PDT) Subject: [pypy-commit] pypy default: reference rawrefcount, remove dead link Message-ID: <57208aa5.2413c30a.a9bf7.05f6@mx.google.com> Author: Matti Picus Branch: Changeset: r83970:5826363e5569 Date: 2016-04-27 12:46 +0300 http://bitbucket.org/pypy/pypy/changeset/5826363e5569/ Log: reference rawrefcount, remove dead link diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -21,7 +21,7 @@ :source:`pypy/doc/discussion/` drafts of ideas and documentation -:source:`pypy/goal/` our :ref:`main PyPy-translation scripts ` +:source:`pypy/goal/` our main PyPy-translation scripts live here :source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -13,3 +13,4 @@ discussion/improve-rpython discussion/ctypes-implementation discussion/jit-profiler + discussion/rawrefcount diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -106,8 +106,12 @@ For information on which third party extensions work (or do not work) with PyPy see the `compatibility wiki`_. +For more information about how we manage refcounting semamtics see +rawrefcount_ + .. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ +.. _rawrefcount: discussion/rawrefcount.html On which platforms does PyPy run? From pypy.commits at gmail.com Wed Apr 27 08:45:04 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 27 Apr 2016 05:45:04 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: stitching was not written to the log, but it is now Message-ID: <5720b450.c4efc20a.a49d6.546d@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83971:33c6e3af459c Date: 2016-04-27 14:43 +0200 http://bitbucket.org/pypy/pypy/changeset/33c6e3af459c/ Log: stitching was not written to the log, but it is now diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -612,6 +612,8 @@ if logger: log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) log.write(inputargs, operations, ops_offset) + # log that the already written bridge is stitched to a descr! + logger.log_patch_guard(descr_number, rawstart) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() From pypy.commits at gmail.com Wed Apr 27 08:49:34 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:34 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: revert 8d781f7a74f7, fix issue #2282 Message-ID: <5720b55e.22c8c20a.e6a6c.598f@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83972:79a67a628ed8 Date: 2016-04-25 08:20 +0300 http://bitbucket.org/pypy/pypy/changeset/79a67a628ed8/ Log: revert 8d781f7a74f7, fix issue #2282 diff too long, truncating to 2000 out of 2280 lines diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -150,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -158,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -1345,7 +1357,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,10 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +#define import_array() +#define PyArray_New _PyArray_New + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,6 +1,8 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 */ -/* For testing ndarrayobject only */ +/* NDArray object interface - S. H. Muller, 2013/07/26 + * It will be copied by numpy/core/setup.py by install_data to + * site-packages/numpy/core/includes/numpy +*/ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -8,8 +10,13 @@ extern "C" { #endif +#include "old_defines.h" #include "npy_common.h" -#include "ndarraytypes.h" +#include "__multiarray_api.h" + +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -17,18 +24,207 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -/* functions defined in ndarrayobject.c*/ +#ifndef NDARRAYTYPES_H +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +/* data types copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + + +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_FLAGS _PyArray_FLAGS + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) + +#define PyArray_New _PyArray_New +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_CopyInto _PyArray_CopyInto +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,9 +1,69 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H -/* For testing ndarrayobject only */ +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" -#include "numpy/npy_common.h" +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -31,6 +91,18 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + /* * These characters correspond to the array type and the struct * module @@ -85,6 +157,27 @@ }; typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -93,6 +186,7 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -106,6 +200,729 @@ NPY_KEEPORDER=2 } NPY_ORDER; +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +#define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This is not used internally. */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * If NPY_RELAXED_STRIDES_CHECKING is set, a higher dimensional + * array is always C_CONTIGUOUS and F_CONTIGUOUS if it has zero elements + * and the array is contiguous if ndarray.squeeze() is contiguous. + * I.e. dimensions for which `ndarray.shape[dimension] == 1` are + * ignored. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* * C API: consists of Macros and functions. The MACROS are defined @@ -120,4 +937,850 @@ #define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) #define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \ + { _save = PyEval_SaveThread();} } while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define _PyArray_ITER_NEXT3(it) do { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] \ + (it)->backstrides[1]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ + int iteraxes[NPY_MAXDIMS]; + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + /* + * consec is first used to indicate wether fancy indices are + * consecutive and then denotes at which axis they are inserted + */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +//#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API)) +/* + * Changing access macros into functions, to allow for future hiding + * of the internal memory layout. This later hiding will allow the 2.x series + * to change the internal representation of arrays without affecting + * ABI compatibility. + */ + +static NPY_INLINE int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static NPY_INLINE void * +PyArray_DATA(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE char * +PyArray_BYTES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static NPY_INLINE npy_intp * +PyArray_DIMS(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static NPY_INLINE npy_intp * +PyArray_STRIDES(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static NPY_INLINE npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static NPY_INLINE npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static NPY_INLINE PyObject * +PyArray_BASE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static NPY_INLINE PyArray_Descr * +PyArray_DESCR(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + +static NPY_INLINE npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->elsize; +} + +static NPY_INLINE int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static NPY_INLINE int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static NPY_INLINE PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return ((PyArrayObject_fields *)arr)->descr->f->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +static NPY_INLINE int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return ((PyArrayObject_fields *)arr)->descr->f->setitem( + v, itemptr, arr); +} + +#else + +/* These macros are deprecated as of NumPy 1.7. */ +#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd) +#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data) +#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags) +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS)) +#define PyArray_ITEMSIZE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) \ + (((PyArrayObject_fields *)(obj))->descr->type_num) +#define PyArray_GETITEM(obj,itemptr) \ + PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) +#endif + +static NPY_INLINE PyArray_Descr * +PyArray_DTYPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static NPY_INLINE npy_intp * +PyArray_SHAPE(PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static NPY_INLINE void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) +#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + +/* + * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions. + * See the documentation for PyDataMem_SetEventHook. + */ +typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size, + void *user_data); + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +#include "npy_1_7_deprecated_api.h" +#endif +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ From pypy.commits at gmail.com Wed Apr 27 08:49:41 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:41 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: done in 5c74afbd7d74 Message-ID: <5720b565.26b0c20a.cb528.5bcf@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83976:5421b73a9a3b Date: 2016-04-27 15:33 +0300 http://bitbucket.org/pypy/pypy/changeset/5421b73a9a3b/ Log: done in 5c74afbd7d74 diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,6 +1,3 @@ * python setup.py install in numpy does not somehow tell setuptools it's installed (I bet it's about the py27 tag) * reduce size of generated c code from slot definitions in slotdefs. -* fix py_string_as_string_unicode-getstringandsize_unicode which - segfaults when run -A after printing '.', the same test passes cpython -A - and untranslated From pypy.commits at gmail.com Wed Apr 27 08:49:35 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:35 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: fix test (revert part of a03329def3ec) Message-ID: <5720b55f.47afc20a.9c2cb.5b1d@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83973:34e2b29d5a14 Date: 2016-04-25 08:45 +0300 http://bitbucket.org/pypy/pypy/changeset/34e2b29d5a14/ Log: fix test (revert part of a03329def3ec) diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -102,7 +102,7 @@ def test_copy_header_files(tmpdir): - api.copy_header_files(tmpdir) + api.copy_header_files(tmpdir, True) def check(name): f = tmpdir.join(name) assert f.check(file=True) From pypy.commits at gmail.com Wed Apr 27 08:49:42 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:42 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: document the header directory Message-ID: <5720b566.2a18c20a.a67b2.63be@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83977:7ae32e569be7 Date: 2016-04-27 15:40 +0300 http://bitbucket.org/pypy/pypy/changeset/7ae32e569be7/ Log: document the header directory diff --git a/pypy/module/cpyext/include/numpy/README b/pypy/module/cpyext/include/numpy/README new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/README @@ -0,0 +1,8 @@ +headers for the micronumpy multiarray and umath modules, +as used by https://bitbucket.org/pypy/numpy. They are needed by +downstream packages that depend on numpy, like matplotlib, but can +be slightly non-compatible with traditional numpy C-API use cases. + +The trick to including these headers is in get_include, located in +numpy/lib/utils.py. They will be ignored by an upstream build of numpy +since the /numpy/core/include path will be used instead From pypy.commits at gmail.com Wed Apr 27 08:49:37 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:37 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: tweak headers for new pypy_numpy.h Message-ID: <5720b561.e109c20a.8d89a.50dd@mx.google.com> Author: mattip Branch: cpyext-for-merge Changeset: r83974:56b8dd0c1546 Date: 2016-04-27 00:24 +0300 http://bitbucket.org/pypy/pypy/changeset/56b8dd0c1546/ Log: tweak headers for new pypy_numpy.h diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -5,6 +5,7 @@ npy_bool obval; } PyBoolScalarObject; -#define import_array() -#define PyArray_New _PyArray_New +static int import_array(){}; +static int _import_array(){}; +static int _import_math(){}; diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -10,6 +10,7 @@ extern "C" { #endif +#include "pypy_numpy.h" #include "old_defines.h" #include "npy_common.h" #include "__multiarray_api.h" From pypy.commits at gmail.com Wed Apr 27 08:49:39 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:39 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: tweak includes to reduce warnings Message-ID: <5720b563.08121c0a.87a39.ffffeae2@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83975:dcf60419c6be Date: 2016-04-27 15:31 +0300 http://bitbucket.org/pypy/pypy/changeset/dcf60419c6be/ Log: tweak includes to reduce warnings diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h --- a/pypy/module/cpyext/include/numpy/__multiarray_api.h +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -5,7 +5,7 @@ npy_bool obval; } PyBoolScalarObject; -static int import_array(){}; -static int _import_array(){}; -static int _import_math(){}; +static int import_array(){return 0;}; +static int _import_array(){return 0;}; +static int _import_math(){return 0;}; diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -201,7 +201,6 @@ #define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto #define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) From pypy.commits at gmail.com Wed Apr 27 08:49:44 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:44 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge cpyext-for-merge back into branch Message-ID: <5720b568.d3161c0a.eb71.ffffea06@mx.google.com> Author: Matti Picus Branch: cpyext-ext Changeset: r83978:181c8b1f5467 Date: 2016-04-27 15:45 +0300 http://bitbucket.org/pypy/pypy/changeset/181c8b1f5467/ Log: merge cpyext-for-merge back into branch diff too long, truncating to 2000 out of 6948 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -20,3 +20,4 @@ 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 +3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1 diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -51,6 +51,8 @@ # if log is not opened, open it now if not _S_log_open: openlog() + if isinstance(message, unicode): + message = str(message) lib.syslog(priority, "%s", message) @builtinify diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,15 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -222,39 +213,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -265,22 +231,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -296,15 +250,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -317,16 +266,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -102,15 +102,15 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev + tk-dev libgc-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations @@ -114,8 +120,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations @@ -133,8 +139,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -1,16 +1,22 @@ What is PyPy? ============= -In common parlance, PyPy has been used to mean two things. The first is the -:ref:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. +Historically, PyPy has been used to mean two things. The first is the +:ref:`RPython translation toolchain ` for generating +interpreters for dynamic programming languages. And the second is one +particular implementation of Python_ produced with it. Because RPython +uses the same syntax as Python, this generated version became known as +Python interpreter written in Python. It is designed to be flexible and +easy to experiment with. -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the +To make it more clear, we start with source code written in RPython, +apply the RPython translation toolchain, and end up with PyPy as a +binary executable. This executable is the Python interpreter. + +Double usage has proven to be confusing, so we've moved away from using +the word PyPy to mean both toolchain and generated interpreter. Now we +use word PyPy to refer to the Python implementation, and explicitly +mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -3,10 +3,17 @@ ======== We have released PyPy 5.1, about a month after PyPy 5.0. -We encourage all users of PyPy to update to this version. Apart from the usual -bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. + +This release includes more improvement to warmup time and memory +requirements. We have seen about a 20% memory requirement reduction and up to +30% warmup time improvement, more detail in the `blog post`_. + +We also now have `fully support for the IBM s390x`_. Since this support is in +`RPython`_, any dynamic language written using RPython, like PyPy, will +automagically be supported on that architecture. + +We updated cffi_ to 1.6, and continue to improve support for the wider +python ecosystem using the PyPy interpreter. You can download the PyPy 5.1 release here: @@ -26,6 +33,9 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _cffi: https://cffi.readthedocs.org +.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html +.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html What is PyPy? ============= @@ -46,7 +56,7 @@ * big- and little-endian variants of **PPC64** running Linux, - * **s960x** running Linux + * **s390x** running Linux .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org @@ -74,6 +84,8 @@ * Fix a corner case in the JIT * Fix edge cases in the cpyext refcounting-compatible semantics + (more work on cpyext compatibility is coming in the ``cpyext-ext`` + branch, but isn't ready yet) * Try harder to not emit NEON instructions on ARM processors without NEON support @@ -92,11 +104,17 @@ * Fix sandbox startup (a regression in 5.0) + * Fix possible segfault for classes with mangled mro or __metaclass__ + + * Fix isinstance(deque(), Hashable) on the pure python deque + + * Fix an issue with forkpty() + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* Numpy: +* Numpy_: * Implemented numpy.where for a single argument @@ -108,6 +126,8 @@ functions exported from libpypy.so are declared in pypy_numpy.h, which is included only when building our fork of numpy + * Add broadcast + * Performance improvements: * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting @@ -119,14 +139,18 @@ * Remove the forced minor collection that occurs when rewriting the assembler at the start of the JIT backend + * Port the resource module to cffi + * Internal refactorings: * Use a simpler logger to speed up translation * Drop vestiges of Python 2.5 support in testing + * Update rpython functions with ones needed for py3k + .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html -.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _Numpy: https://bitbucket.org/pypy/numpy Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -60,3 +60,13 @@ Remove old uneeded numpy headers, what is left is only for testing. Also generate pypy_numpy.h which exposes functions to directly use micronumpy ndarray and ufuncs + +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,14 +3,43 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: 2180e1eaf6f6 +.. startrev: aa60332382a1 -.. branch: rposix-for-3 +.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 -Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). -This updates the underlying rpython functions with the ones needed for the -py3k branch - -.. branch: numpy_broadcast +.. branch: gcheader-decl -Add broadcast to micronumpy +Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. + +.. branch: cpyext-for-merge +Update cpyext C-API support: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots +After this branch, we are almost able to support upstream numpy via cpyext, so +we created (yet another) fork of numpy at github.com/pypy/numpy with the needed +changes diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." @@ -163,7 +162,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space, for_hidden=True) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: @@ -951,8 +942,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,57 +157,16 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,19 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + + self.cls_without_del = _getusercls( + space.config, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space.config, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: @@ -61,6 +60,7 @@ import __pypy__ import sys + result = [False] @__pypy__.hidden_applevel def test_hidden_with_tb(): def not_hidden(): 1/0 @@ -69,9 +69,11 @@ assert sys.exc_info() == (None, None, None) tb = __pypy__.get_hidden_tb() assert tb.tb_frame.f_code.co_name == 'not_hidden' - return True + result[0] = True + raise else: return False - assert test_hidden_with_tb() + raises(ZeroDivisionError, test_hidden_with_tb) + assert result[0] def test_lookup_special(self): from __pypy__ import lookup_special diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -46,6 +46,7 @@ '_get_types': 'func._get_types', '_get_common_types': 'func._get_common_types', 'from_buffer': 'func.from_buffer', + 'gcp': 'func.gcp', 'string': 'func.string', 'unpack': 'func.unpack', diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -1773,14 +1773,14 @@ def test_introspect_order(self): ffi, lib = self.prepare(""" - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -92,7 +92,8 @@ return ctype._call(self.fnptr, args_w) def descr_repr(self, space): - return space.wrap("" % (self.fnname,)) + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + return space.wrap("" % (doc,)) def descr_get_doc(self, space): doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -150,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -158,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -1345,7 +1357,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,2 +1,1 @@ #define PyList_GET_ITEM PyList_GetItem -#define PyList_SET_ITEM PyList_SetItem diff --git a/pypy/module/cpyext/include/numpy/README b/pypy/module/cpyext/include/numpy/README new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/README @@ -0,0 +1,8 @@ +headers for the micronumpy multiarray and umath modules, +as used by https://bitbucket.org/pypy/numpy. They are needed by +downstream packages that depend on numpy, like matplotlib, but can +be slightly non-compatible with traditional numpy C-API use cases. + +The trick to including these headers is in get_include, located in +numpy/lib/utils.py. They will be ignored by an upstream build of numpy +since the /numpy/core/include path will be used instead diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,11 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +static int import_array(){return 0;}; +static int _import_array(){return 0;}; +static int _import_math(){return 0;}; + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,6 +1,8 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 */ -/* For testing ndarrayobject only */ +/* NDArray object interface - S. H. Muller, 2013/07/26 + * It will be copied by numpy/core/setup.py by install_data to + * site-packages/numpy/core/includes/numpy +*/ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -8,8 +10,14 @@ extern "C" { #endif +#include "pypy_numpy.h" +#include "old_defines.h" #include "npy_common.h" -#include "ndarraytypes.h" +#include "__multiarray_api.h" + +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -17,18 +25,206 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -/* functions defined in ndarrayobject.c*/ +#ifndef NDARRAYTYPES_H +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +/* data types copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + + +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_FLAGS _PyArray_FLAGS + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) + +#define PyArray_New _PyArray_New +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,9 +1,69 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H -/* For testing ndarrayobject only */ +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" -#include "numpy/npy_common.h" +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -31,6 +91,18 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + /* * These characters correspond to the array type and the struct * module @@ -85,6 +157,27 @@ }; typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -93,6 +186,7 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -106,6 +200,729 @@ NPY_KEEPORDER=2 } NPY_ORDER; +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL From pypy.commits at gmail.com Wed Apr 27 08:49:46 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 05:49:46 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: revert skipped tests from merge Message-ID: <5720b56a.4ea81c0a.8139a.ffffba69@mx.google.com> Author: Matti Picus Branch: cpyext-ext Changeset: r83979:460c0e1dc5ef Date: 2016-04-27 13:00 +0300 http://bitbucket.org/pypy/pypy/changeset/460c0e1dc5ef/ Log: revert skipped tests from merge diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -123,7 +123,6 @@ return result; ''') assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') - skip('PyByteArrayObject not implemented yet') assert 'foo\0bar\0baz' == pybuffer(bytearray('foo\0bar\0baz')) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -4,7 +4,6 @@ from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.typeobject import PyTypeObjectPtr -import pytest import sys class AppTestTypeObject(AppTestCpythonExtensionBase): @@ -123,7 +122,6 @@ obj = module.fooType.classmeth() assert obj is module.fooType - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='cpython segfaults') def test_new(self): # XXX cpython segfaults but if run singly (with -k test_new) this passes module = self.import_module(name='foo') @@ -178,8 +176,6 @@ x = module.MetaType('name', (), {}) assert isinstance(x, type) assert isinstance(x, module.MetaType) - if self.runappdirect and '__pypy__' in sys.builtin_module_names: - skip('x is not callable when runappdirect??') x() def test_metaclass_compatible(self): @@ -189,17 +185,15 @@ assert type(module.fooType).__mro__ == (type, object) y = module.MetaType('other', (module.MetaType,), {}) assert isinstance(y, module.MetaType) - if self.runappdirect and '__pypy__' in sys.builtin_module_names: - skip('y is not callable when runappdirect??') x = y('something', (type(y),), {}) del x, y def test_metaclass_compatible2(self): - skip('type.__new__ does not check acceptable_as_base_class') # XXX FIX - must raise since fooType (which is a base type) # does not have flag Py_TPFLAGS_BASETYPE module = self.import_module(name='foo') raises(TypeError, module.MetaType, 'other', (module.fooType,), {}) + def test_sre(self): import sys for m in ['_sre', 'sre_compile', 'sre_constants', 'sre_parse', 're']: @@ -891,7 +885,6 @@ #print('calling module.footype()...') module.footype("X", (object,), {}) - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='cpython fails') def test_app_subclass_of_c_type(self): # on cpython, the size changes (6 bytes added) module = self.import_module(name='foo') From pypy.commits at gmail.com Wed Apr 27 09:05:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 27 Apr 2016 06:05:44 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: copy machine code byte by byte instead of converting it to HEX Message-ID: <5720b928.2457c20a.4ec44.6b0c@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83980:c6b6bb67de34 Date: 2016-04-27 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/c6b6bb67de34/ Log: copy machine code byte by byte instead of converting it to HEX diff --git a/rpython/rlib/jitlog.py b/rpython/rlib/jitlog.py --- a/rpython/rlib/jitlog.py +++ b/rpython/rlib/jitlog.py @@ -474,11 +474,21 @@ end_offset = ops_offset[op2] count = end_offset - start_offset - dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset, count) + dump = self.copy_core_dump(self.mc.absolute_addr(), start_offset, count) offset = encode_le_16bit(start_offset) edump = encode_str(dump) self.logger._write_marked(MARK_ASM, offset + edump) + def copy_core_dump(self, addr, offset=0, count=-1): + dump = [] + src = rffi.cast(rffi.CCHARP, addr) + end = self.get_relative_pos() + if count != -1: + end = offset + count + for p in range(offset, end): + dump.append(src[p]) + return ''.join(dump) + def var_to_str(self, arg): try: mv = self.memo[arg] From pypy.commits at gmail.com Wed Apr 27 09:05:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 27 Apr 2016 06:05:46 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: fixed tests and added missing method in rvmprof/__init__.py (for disable_jitlog) Message-ID: <5720b92a.0e711c0a.48265.0821@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83981:638ea201b5b8 Date: 2016-04-27 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/638ea201b5b8/ Log: fixed tests and added missing method in rvmprof/__init__.py (for disable_jitlog) diff --git a/rpython/rlib/rvmprof/__init__.py b/rpython/rlib/rvmprof/__init__.py --- a/rpython/rlib/rvmprof/__init__.py +++ b/rpython/rlib/rvmprof/__init__.py @@ -40,3 +40,6 @@ def disable(): _get_vmprof().disable() + +def disable_jitlog(): + _get_vmprof().disable_jitlog() diff --git a/rpython/rlib/test/test_jitlog.py b/rpython/rlib/test/test_jitlog.py --- a/rpython/rlib/test/test_jitlog.py +++ b/rpython/rlib/test/test_jitlog.py @@ -48,8 +48,8 @@ file.ensure() fd = file.open('wb') logger.cintf.jitlog_init(fd.fileno()) - logger.start_new_trace() - log_trace = logger.log_trace(jl.MARK_TRACE, self.make_metainterp_sd(), None) + logger.start_new_trace(self.make_metainterp_sd()) + log_trace = logger.log_trace(jl.MARK_TRACE, None, None) op = ResOperation(rop.DEBUG_MERGE_POINT, [ConstInt(0), ConstInt(0), ConstInt(0)]) log_trace.write([], [op]) #the next line will close 'fd' @@ -60,7 +60,7 @@ jl.encode_str('loop') + jl.encode_le_addr(0) + \ chr(jl.MARK_TRACE) + jl.encode_le_addr(0) + \ chr(jl.MARK_INPUT_ARGS) + jl.encode_str('') + \ - chr(jl.MARK_INIT_MERGE_POINT) + b'\x01s\x00i\x08s\x00i\x10s' + \ + chr(jl.MARK_INIT_MERGE_POINT) + b'\x05\x00\x01s\x00i\x08s\x00i\x10s' + \ chr(jl.MARK_MERGE_POINT) + \ b'\xff' + encode_str('/home/pypy/jit.py') + \ b'\x00' + encode_le_64bit(0) + \ From pypy.commits at gmail.com Wed Apr 27 09:50:59 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 06:50:59 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: test fix Message-ID: <5720c3c3.a82cc20a.faa2b.7449@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83982:9d2ecd1dcf3f Date: 2016-04-27 15:43 +0200 http://bitbucket.org/pypy/pypy/changeset/9d2ecd1dcf3f/ Log: test fix diff --git a/rpython/rtyper/lltypesystem/test/test_lloperation.py b/rpython/rtyper/lltypesystem/test/test_lloperation.py --- a/rpython/rtyper/lltypesystem/test/test_lloperation.py +++ b/rpython/rtyper/lltypesystem/test/test_lloperation.py @@ -53,7 +53,7 @@ from rpython.flowspace.model import Variable, Constant assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) - assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) + assert not llop.setarrayitem.is_pure([Variable(), Variable(), Variable()]) # S1 = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) v_s1 = Variable() From pypy.commits at gmail.com Wed Apr 27 09:51:01 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 06:51:01 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: a remaining direct usage of int_add_ovf Message-ID: <5720c3c5.109a1c0a.e5588.1777@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83983:f61ef2f172af Date: 2016-04-27 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/f61ef2f172af/ Log: a remaining direct usage of int_add_ovf diff --git a/rpython/rtyper/lltypesystem/rtagged.py b/rpython/rtyper/lltypesystem/rtagged.py --- a/rpython/rtyper/lltypesystem/rtagged.py +++ b/rpython/rtyper/lltypesystem/rtagged.py @@ -3,6 +3,7 @@ InstanceRepr, CLASSTYPE, ll_inst_type, MissingRTypeAttribute, ll_issubclass_const, getclassrepr, getinstancerepr, get_type_repr) from rpython.rtyper.rmodel import TyperError, inputconst +from rpython.rlib.rarithmetic import r_uint, intmask class TaggedInstanceRepr(InstanceRepr): @@ -40,12 +41,8 @@ raise TyperError("must instantiate %r with a simple class call" % ( self.classdef,)) v_value = hop.inputarg(lltype.Signed, arg=1) - c_one = hop.inputconst(lltype.Signed, 1) hop.exception_is_here() - v2 = hop.genop('int_add_ovf', [v_value, v_value], - resulttype = lltype.Signed) - v2p1 = hop.genop('int_add', [v2, c_one], - resulttype = lltype.Signed) + v2p1 = hop.gendirectcall(ll_times_two_plus_one, v_value) v_instance = hop.genop('cast_int_to_ptr', [v2p1], resulttype = self.lowleveltype) return v_instance, False # don't call __init__ @@ -140,6 +137,11 @@ return hop.gendirectcall(ll_unboxed_isinstance_const, v_obj, minid, maxid, c_answer_if_unboxed) +def ll_times_two_plus_one(x): + r = intmask(r_uint(x) << 1) + if r^x < 0: + raise OverflowError("integer addition") + return r + 1 def ll_int_to_unboxed(PTRTYPE, value): return lltype.cast_int_to_ptr(PTRTYPE, value*2+1) From pypy.commits at gmail.com Wed Apr 27 10:04:08 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 07:04:08 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: fix Message-ID: <5720c6d8.85661c0a.873b8.2506@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83984:0e92c3689359 Date: 2016-04-27 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/0e92c3689359/ Log: fix diff --git a/rpython/rlib/rerased.py b/rpython/rlib/rerased.py --- a/rpython/rlib/rerased.py +++ b/rpython/rlib/rerased.py @@ -20,7 +20,7 @@ from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rlib.rarithmetic import is_valid_int +from rpython.rlib.rarithmetic import is_valid_int, r_uint, intmask from rpython.rlib.debug import ll_assert @@ -212,12 +212,12 @@ def _rtype_erase_int(hop): [v_value] = hop.inputargs(lltype.Signed) - c_one = hop.inputconst(lltype.Signed, 1) hop.exception_is_here() - v2 = hop.genop('int_add_ovf', [v_value, v_value], - resulttype = lltype.Signed) - v2p1 = hop.genop('int_add', [v2, c_one], - resulttype = lltype.Signed) - v_instance = hop.genop('cast_int_to_ptr', [v2p1], - resulttype=llmemory.GCREF) + v_instance = hop.gendirectcall(_ll_erase_int, v_value) return v_instance + +def _ll_erase_int(x): + r = intmask(r_uint(x) << 1) + if r^x < 0: + raise OverflowError("integer addition") + return lltype.cast_int_to_ptr(llmemory.GCREF, r + 1) From pypy.commits at gmail.com Wed Apr 27 10:04:10 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 07:04:10 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: fix test Message-ID: <5720c6da.0e711c0a.48265.2145@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83985:72f1e6ff2d9b Date: 2016-04-27 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/72f1e6ff2d9b/ Log: fix test diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -23,9 +23,11 @@ return ovfcheck(x*2) except OverflowError: return -42 - graph, _ = translate(f, [int]) + graph, _ = translate(f, [int], backend_optimize=False) assert len(graph.startblock.operations) == 1 - assert graph.startblock.operations[0].opname == 'int_mul_ovf' + assert graph.startblock.operations[0].opname == 'direct_call' + assert 'll_int_mul_ovf' in repr( + graph.startblock.operations[0].args[0].value) assert len(graph.startblock.exits) == 2 assert [link.target.operations for link in graph.startblock.exits] == \ [(), ()] @@ -36,9 +38,9 @@ from rpython.rlib.rarithmetic import ovfcheck def f(x): return ovfcheck(x*2) - 1 - graph, _ = translate(f, [int]) + graph, _ = translate(f, [int], backend_optimize=False) assert len(graph.startblock.operations) == 2 - assert graph.startblock.operations[0].opname == 'int_mul_ovf' + assert graph.startblock.operations[0].opname == 'direct_call' assert graph.startblock.operations[1].opname == 'int_sub' def test_remove_ovfcheck_floordiv(): @@ -52,9 +54,11 @@ return -42 except ZeroDivisionError: return -43 - graph, _ = translate(f, [int, int]) + graph, _ = translate(f, [int, int], backend_optimize=False) assert len(graph.startblock.operations) == 1 - assert graph.startblock.operations[0].opname == 'int_floordiv_ovf_zer' + assert graph.startblock.operations[0].opname == 'direct_call' + assert 'int_floordiv_ovf_zer' in repr( + graph.startblock.operations[0].args[0].value) assert len(graph.startblock.exits) == 3 assert [link.target.operations for link in graph.startblock.exits[1:]] == \ [(), ()] @@ -68,9 +72,11 @@ return ovfcheck(x // y) except ZeroDivisionError: return -43 - graph, _ = translate(f, [int, int]) + graph, _ = translate(f, [int, int], backend_optimize=False) assert len(graph.startblock.operations) == 1 - assert graph.startblock.operations[0].opname == 'int_floordiv_ovf_zer' + assert graph.startblock.operations[0].opname == 'direct_call' + assert 'int_floordiv_ovf_zer' in repr( + graph.startblock.operations[0].args[0].value) assert len(graph.startblock.exits) == 3 assert [link.target.operations for link in graph.startblock.exits[1:]] == \ [(), ()] From pypy.commits at gmail.com Wed Apr 27 11:14:14 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 08:14:14 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: fix for win32 and c90 Message-ID: <5720d746.4412c30a.fec01.ffff9641@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83986:727b9be53899 Date: 2016-04-27 18:13 +0300 http://bitbucket.org/pypy/pypy/changeset/727b9be53899/ Log: fix for win32 and c90 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -87,13 +87,13 @@ FILEP = rffi.COpaquePtr('FILE') if sys.platform == 'win32': - fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT) + dash = '_' else: - fileno = rffi.llexternal('fileno', [FILEP], rffi.INT) - + dash = '' +fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT) fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) -fdopen = rffi.llexternal('fdopen', [rffi.INT, CONST_STRING], FILEP, - save_err=rffi.RFFI_SAVE_ERRNO) +fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING], + FILEP, save_err=rffi.RFFI_SAVE_ERRNO) _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) def fclose(fp): diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -141,13 +141,14 @@ module = self.import_extension('foo', [ ("test_get_item", "METH_NOARGS", """ - PyObject* o = PyList_New(1); + PyObject* o, *o2, *o3; + o = PyList_New(1); - PyObject* o2 = PyInt_FromLong(0); + o2 = PyInt_FromLong(0); PyList_SET_ITEM(o, 0, o2); o2 = NULL; - PyObject* o3 = PyList_GET_ITEM(o, 0); + o3 = PyList_GET_ITEM(o, 0); Py_INCREF(o3); Py_CLEAR(o); return o3; @@ -161,16 +162,17 @@ """ PyObject* o = PyList_New(0); PyObject* o2 = PyList_New(0); + Py_ssize_t refcount, new_refcount; PyList_Append(o, o2); // does not steal o2 - Py_ssize_t refcount = Py_REFCNT(o2); + refcount = Py_REFCNT(o2); // Steal a reference to o2, but leak the old reference to o2. // The net result should be no change in refcount. PyList_SET_ITEM(o, 0, o2); - Py_ssize_t new_refcount = Py_REFCNT(o2); + new_refcount = Py_REFCNT(o2); Py_CLEAR(o); Py_DECREF(o2); // append incref'd. diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -118,12 +118,13 @@ module = self.import_extension('foo', [ ("bounce", "METH_NOARGS", """ + PyThreadState * tstate; if (PyEval_ThreadsInitialized() == 0) { PyEval_InitThreads(); } PyGILState_Ensure(); - PyThreadState *tstate = PyEval_SaveThread(); + tstate = PyEval_SaveThread(); if (tstate == NULL) { return PyLong_FromLong(0); } From pypy.commits at gmail.com Wed Apr 27 11:17:09 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 08:17:09 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-for-merge: close branch to be merged Message-ID: <5720d7f5.43ecc20a.da89f.ffff93e3@mx.google.com> Author: Matti Picus Branch: cpyext-for-merge Changeset: r83987:58de768503e1 Date: 2016-04-27 18:14 +0300 http://bitbucket.org/pypy/pypy/changeset/58de768503e1/ Log: close branch to be merged From pypy.commits at gmail.com Wed Apr 27 11:17:12 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 08:17:12 -0700 (PDT) Subject: [pypy-commit] pypy default: merge a major refactoring of parts of cpyext to default Message-ID: <5720d7f8.821b1c0a.3ab80.361c@mx.google.com> Author: Matti Picus Branch: Changeset: r83988:26e1999e0803 Date: 2016-04-27 18:16 +0300 http://bitbucket.org/pypy/pypy/changeset/26e1999e0803/ Log: merge a major refactoring of parts of cpyext to default diff too long, truncating to 2000 out of 8378 lines diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,3 @@ +* python setup.py install in numpy does not somehow tell setuptools + it's installed (I bet it's about the py27 tag) +* reduce size of generated c code from slot definitions in slotdefs. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -22,3 +22,24 @@ JIT: use bitstrings to compress the lists of read or written descrs that we attach to EffectInfo. Fixes a problem we had in remove-objspace-options. + +.. branch: cpyext-for-merge +Update cpyext C-API support: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots +After this branch, we are almost able to support upstream numpy via cpyext, so +we created (yet another) fork of numpy at github.com/pypy/numpy with the needed +changes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1176,7 +1176,27 @@ return self.w_False def issequence_w(self, w_obj): - return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return False + elif flag == 'S': + return True + else: + return (self.lookup(w_obj, '__getitem__') is not None) + + def ismapping_w(self, w_obj): + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return True + elif flag == 'S': + return False + else: + return (self.lookup(w_obj, '__getitem__') is not None and + self.lookup(w_obj, '__getslice__') is None) # The code below only works # for the simple case (new-style instance). diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -85,11 +87,13 @@ FILEP = rffi.COpaquePtr('FILE') if sys.platform == 'win32': - fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT) + dash = '_' else: - fileno = rffi.llexternal('fileno', [FILEP], rffi.INT) - + dash = '' +fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT) fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) +fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING], + FILEP, save_err=rffi.RFFI_SAVE_ERRNO) _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) def fclose(fp): @@ -119,16 +123,18 @@ def is_valid_fp(fp): return is_valid_fd(fileno(fp)) +pypy_decl = 'pypy_decl.h' + constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) -udir.join('pypy_decl.h').write("/* Will be filled later */\n") +udir.join(pypy_decl).write("/* Will be filled later */\n") udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) @@ -144,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -177,6 +195,61 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -196,6 +269,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -414,15 +490,14 @@ 'PyThread_acquire_lock', 'PyThread_release_lock', 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', - 'PyThread_ReInitTLS', + 'PyThread_ReInitTLS', 'PyThread_init_thread', + 'PyThread_start_new_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', - 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto', - 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', @@ -431,11 +506,11 @@ ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), - '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), + '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), + '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'), + '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), + '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), } FORWARD_DECLS = [] @@ -461,6 +536,7 @@ "PyUnicode_Type": "space.w_unicode", "PyBaseString_Type": "space.w_basestring", "PyDict_Type": "space.w_dict", + "PyDictProxy_Type": "cpyext.dictobject.make_frozendict(space)", "PyTuple_Type": "space.w_tuple", "PyList_Type": "space.w_list", "PySet_Type": "space.w_set", @@ -484,7 +560,7 @@ 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject PyDictObject PyClassObject'''.split(): @@ -602,7 +678,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -610,8 +693,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -620,7 +722,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -629,6 +732,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) @@ -688,8 +793,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -782,6 +899,9 @@ structindex = {} for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + # added only for the macro, not the decl + continue restype, args = c_function_signature(db, func) members.append('%s (*%s)(%s);' % (restype, name, args)) structindex[name] = len(structindex) @@ -798,7 +918,7 @@ global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): - if "#" in name: + if '#' in name: continue if typ == 'PyDateTime_CAPI*': continue @@ -822,7 +942,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code) + eci = build_eci(True, export_symbols, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -834,7 +954,7 @@ ob = rawrefcount.next_dead(PyObject) if not ob: break - print ob + print 'deallocating PyObject', ob decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" @@ -853,8 +973,8 @@ for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) - if name.endswith('#'): - name = name[:-1] + if '#' in name: + name = name.split('#')[0] isptr = False else: isptr = True @@ -899,7 +1019,7 @@ # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): - if name.startswith('cpyext_'): # XXX hack + if name.startswith('cpyext_') or func is None: # XXX hack continue pypyAPI[structindex[name]] = ctypes.cast( ll2ctypes.lltype2ctypes(func.get_llhelper(space)), @@ -952,6 +1072,8 @@ cpyext_type_init = self.cpyext_type_init self.cpyext_type_init = None for pto, w_type in cpyext_type_init: + if space.is_w(w_type, space.w_str): + pto.c_tp_itemsize = 1 finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -969,10 +1091,14 @@ pypy_macros = [] renamed_symbols = [] for name in export_symbols: - name = name.replace("#", "") + if '#' in name: + name,header = name.split('#') + else: + header = pypy_decl newname = mangle_name(prefix, name) assert newname, name - pypy_macros.append('#define %s %s' % (name, newname)) + if header == pypy_decl: + pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) @@ -1001,7 +1127,7 @@ # implement function callbacks and generate function decls functions = [] decls = {} - pypy_decls = decls['pypy_decl.h'] = [] + pypy_decls = decls[pypy_decl] = [] pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1017,6 +1143,8 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if not func: + continue if header == DEFAULT_HEADER: _name = name else: @@ -1042,12 +1170,15 @@ functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name) for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - name = name.replace("#", "") + if '#' in name: + name, header = name.split("#") typ = typ.replace("*", "") elif name.startswith('PyExc_'): typ = 'PyObject*' - pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + header = pypy_decl + if header != pypy_decl: + decls[header].append('#define %s %s' % (name, mangle_name(prefix, name))) + decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name)) for header_name in FUNCTIONS_BY_HEADER.keys(): header = decls[header_name] @@ -1075,9 +1206,10 @@ source_dir / "pysignals.c", source_dir / "pythread.c", source_dir / "missing.c", + source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code): +def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1099,9 +1231,11 @@ # Generate definitions for global structures structs = ["#include "] + if use_micronumpy: + structs.append('#include /* api.py line 1223 */') for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - structs.append('%s %s;' % (typ[:-1], name[:-1])) + if '#' in name: + structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) elif name.startswith('PyExc_'): structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) @@ -1142,11 +1276,12 @@ use_micronumpy = space.config.objspace.usemodules.micronumpy if not use_micronumpy: return use_micronumpy - # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject - global GLOBALS, SYMBOLS_C, separate_module_files - GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + # import registers api functions by side-effect, we also need HEADER + from pypy.module.cpyext.ndarrayobject import HEADER + global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: + FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None + GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy @@ -1156,14 +1291,18 @@ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() + prefix = 'PyPy' - generate_macros(export_symbols, prefix='PyPy') + generate_macros(export_symbols, prefix=prefix) functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix='PyPy') - code = "#include \n" + "\n".join(functions) + prefix=prefix) + code = "#include \n" + if use_micronumpy: + code += "#include /* api.py line 1290 */" + code += "\n".join(functions) - eci = build_eci(False, export_symbols, code) + eci = build_eci(False, export_symbols, code, use_micronumpy) space.fromcache(State).install_dll(eci) @@ -1175,9 +1314,14 @@ lines = ['PyObject *pypy_static_pyobjs[] = {\n'] include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - if name.endswith('#'): + if '#' in name: + name, header = name.split('#') assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') - typ, name = typ[:-1], name[:-1] + typ = typ[:-1] + if header != pypy_decl: + # since the #define is not in pypy_macros, do it here + mname = mangle_name(prefix, name) + include_lines.append('#define %s %s\n' % (name, mname)) elif name.startswith('PyExc_'): typ = 'PyTypeObject' name = '_' + name @@ -1204,6 +1348,8 @@ for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + continue newname = mangle_name('PyPy', name) or name deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) @@ -1211,7 +1357,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) @@ -1348,10 +1494,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,11 +2,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + make_typedescr, get_typedescr, as_pyobj, Py_IncRef) ## ## Implementation of PyStringObject @@ -27,7 +27,7 @@ ## Solution ## -------- ## -## PyStringObject contains two additional members: the size and a pointer to a +## PyStringObject contains two additional members: the ob_size and a pointer to a ## char buffer; it may be NULL. ## ## - A string allocated by pypy will be converted into a PyStringObject with a @@ -36,7 +36,7 @@ ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyStringObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no +## size+1, but the reference won't be stored in the global map; there is no ## corresponding object in pypy. When from_ref() or Py_INCREF() is called, ## the pypy string is created, and added to the global map of tracked ## objects. The buffer is then supposed to be immutable. @@ -52,8 +52,8 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +PyStringObjectFields = PyVarObjectFields + \ + (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function @@ -78,10 +78,11 @@ py_str = rffi.cast(PyStringObject, py_obj) buflen = length + 1 - py_str.c_size = length + py_str.c_ob_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw', zero=True, add_memory_pressure=True) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str def string_attach(space, py_obj, w_obj): @@ -90,8 +91,10 @@ buffer must not be modified. """ py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) + py_str.c_ob_size = len(space.str_w(w_obj)) py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL def string_realize(space, py_obj): """ @@ -99,8 +102,13 @@ be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + if not py_str.c_buffer: + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1, + flavor='raw', zero=True) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrap(s) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL track_reference(space, py_obj, w_obj) return w_obj @@ -169,12 +177,12 @@ ref_str.c_buffer = rffi.str2charp(s) buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 while ref_str.c_buffer[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @@ -183,7 +191,7 @@ def PyString_Size(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: ref = rffi.cast(PyStringObject, ref) - return ref.c_size + return ref.c_ob_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @@ -212,7 +220,7 @@ ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize - oldsize = py_str.c_size + oldsize = py_str.c_ob_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): @@ -236,15 +244,16 @@ if not ref[0]: return - if w_newpart is None or not PyString_Check(space, ref[0]) or \ - not PyString_Check(space, w_newpart): + if w_newpart is None or not PyString_Check(space, ref[0]) or not \ + (space.isinstance_w(w_newpart, space.w_str) or + space.isinstance_w(w_newpart, space.w_unicode)): Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) ref[0] = make_ref(space, w_newstr) + Py_IncRef(space, ref[0]) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyString_ConcatAndDel(space, ref, newpart): diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -42,9 +42,15 @@ return datetimeAPI -PyDateTime_Date = PyObject -PyDateTime_Time = PyObject -PyDateTime_DateTime = PyObject +PyDateTime_DateStruct = lltype.ForwardReference() +PyDateTime_TimeStruct = lltype.ForwardReference() +PyDateTime_DateTimeStruct = lltype.ForwardReference() +cpython_struct("PyDateTime_Date", PyObjectFields, PyDateTime_DateStruct) +PyDateTime_Date = lltype.Ptr(PyDateTime_DateStruct) +cpython_struct("PyDateTime_Time", PyObjectFields, PyDateTime_TimeStruct) +PyDateTime_Time = lltype.Ptr(PyDateTime_TimeStruct) +cpython_struct("PyDateTime_DateTime", PyObjectFields, PyDateTime_DateTimeStruct) +PyDateTime_DateTime = lltype.Ptr(PyDateTime_DateTimeStruct) PyDeltaObjectStruct = lltype.ForwardReference() cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct) diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -1,16 +1,51 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, cpython_api, cpython_struct, PyObject, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.objspace.std.complexobject import W_ComplexObject from pypy.interpreter.error import OperationError PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex") -Py_complex_t = lltype.ForwardReference() +Py_complex_t = rffi.CStruct('Py_complex_t', + ('real', rffi.DOUBLE), + ('imag', rffi.DOUBLE), + hints={'size': 2 * rffi.sizeof(rffi.DOUBLE)}) Py_complex_ptr = lltype.Ptr(Py_complex_t) -Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE)) -cpython_struct("Py_complex", Py_complex_fields, Py_complex_t) + +PyComplexObjectStruct = lltype.ForwardReference() +PyComplexObject = lltype.Ptr(PyComplexObjectStruct) +PyComplexObjectFields = PyObjectFields + \ + (("cval", Py_complex_t),) +cpython_struct("PyComplexObject", PyComplexObjectFields, PyComplexObjectStruct) + + at bootstrap_function +def init_complexobject(space): + "Type description of PyComplexObject" + make_typedescr(space.w_complex.layout.typedef, + basestruct=PyComplexObject.TO, + attach=complex_attach, + realize=complex_realize) + +def complex_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyComplexObject with the given complex object. The + value must not be modified. + """ + assert isinstance(w_obj, W_ComplexObject) + py_obj = rffi.cast(PyComplexObject, py_obj) + py_obj.c_cval.c_real = w_obj.realval + py_obj.c_cval.c_imag = w_obj.imagval + +def complex_realize(space, obj): + py_obj = rffi.cast(PyComplexObject, obj) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_ComplexObject, w_type) + w_obj.__init__(py_obj.c_cval.c_real, py_obj.c_cval.c_imag) + track_reference(space, obj, w_obj) + return w_obj @cpython_api([lltype.Float, lltype.Float], PyObject) diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -23,6 +23,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) @@ -62,6 +63,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) @@ -104,6 +106,32 @@ """ return space.call_method(space.w_dict, "copy", w_obj) +def _has_val(space, w_dict, w_key): + try: + w_val = space.getitem(w_dict, w_key) + except OperationError as e: + if e.match(space, space.w_KeyError): + return False + else: + raise + return True + + at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) +def PyDict_Merge(space, w_a, w_b, override): + """Iterate over mapping object b adding key-value pairs to dictionary a. + b may be a dictionary, or any object supporting PyMapping_Keys() + and PyObject_GetItem(). If override is true, existing pairs in a + will be replaced if a matching key is found in b, otherwise pairs will + only be added if there is not a matching key in a. Return 0 on + success or -1 if an exception was raised. + """ + override = rffi.cast(lltype.Signed, override) + w_keys = space.call_method(w_b, "keys") + for w_key in space.iteriterable(w_keys): + if not _has_val(space, w_a, w_key) or override != 0: + space.setitem(w_a, w_key, space.getitem(w_b, w_key)) + return 0 + @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_Update(space, w_obj, w_other): """This is the same as PyDict_Merge(a, b, 1) in C, or a.update(b) in @@ -204,6 +232,12 @@ @specialize.memo() def make_frozendict(space): + if space not in _frozendict_cache: + _frozendict_cache[space] = _make_frozendict(space) + return _frozendict_cache[space] + +_frozendict_cache = {} +def _make_frozendict(space): return space.appexec([], '''(): import _abcoll class FrozenDict(_abcoll.Mapping): @@ -222,3 +256,15 @@ w_frozendict = make_frozendict(space) return space.call_function(w_frozendict, w_dict) + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyDictProxy_Check(space, w_obj): + w_typ = make_frozendict(space) + #print 'check', w_typ, space.type(w_obj) + return space.isinstance_w(w_obj, w_typ) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyDictProxy_CheckExact(space, w_obj): + w_typ = make_frozendict(space) + #print 'exact', w_typ, w_obj + return space.is_w(space.type(w_obj), w_typ) + diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -1,8 +1,42 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, + cpython_struct, CANNOT_FAIL, cpython_api, PyObject, build_type_checkers, CONST_STRING) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.interpreter.error import OperationError from rpython.rlib.rstruct import runpack +from pypy.objspace.std.floatobject import W_FloatObject + +PyFloatObjectStruct = lltype.ForwardReference() +PyFloatObject = lltype.Ptr(PyFloatObjectStruct) +PyFloatObjectFields = PyObjectFields + \ + (("ob_fval", rffi.DOUBLE),) +cpython_struct("PyFloatObject", PyFloatObjectFields, PyFloatObjectStruct) + + at bootstrap_function +def init_floatobject(space): + "Type description of PyFloatObject" + make_typedescr(space.w_float.layout.typedef, + basestruct=PyFloatObject.TO, + attach=float_attach, + realize=float_realize) + +def float_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyFloatObject with the given float object. The + value must not be modified. + """ + py_float = rffi.cast(PyFloatObject, py_obj) + py_float.c_ob_fval = space.float_w(w_obj) + +def float_realize(space, obj): + floatval = rffi.cast(lltype.Float, rffi.cast(PyFloatObject, obj).c_ob_fval) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_FloatObject, w_type) + w_obj.__init__(floatval) + track_reference(space, obj, w_obj) + return w_obj PyFloat_Check, PyFloat_CheckExact = build_type_checkers("Float") diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -112,9 +112,11 @@ #include "dictobject.h" #include "intobject.h" #include "listobject.h" +#include "longobject.h" #include "unicodeobject.h" #include "compile.h" #include "frameobject.h" +#include "memoryobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -6,14 +6,16 @@ extern "C" { #endif -/* fake PyComplexObject so that code that doesn't do direct field access works */ -#define PyComplexObject PyObject - typedef struct Py_complex_t { double real; double imag; } Py_complex; +typedef struct { + PyObject_HEAD + Py_complex cval; +} PyComplexObject; + /* generated function */ PyAPI_FUNC(int) _PyComplex_AsCComplex(PyObject *, Py_complex *); PyAPI_FUNC(PyObject *) _PyComplex_FromCComplex(Py_complex *); diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h --- a/pypy/module/cpyext/include/datetime.h +++ b/pypy/module/cpyext/include/datetime.h @@ -24,6 +24,18 @@ PyObject_HEAD } PyDateTime_Delta; +typedef struct { + PyObject_HEAD +} PyDateTime_Date; + +typedef struct { + PyObject_HEAD +} PyDateTime_Time; + +typedef struct { + PyObject_HEAD +} PyDateTime_DateTime; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/descrobject.h b/pypy/module/cpyext/include/descrobject.h --- a/pypy/module/cpyext/include/descrobject.h +++ b/pypy/module/cpyext/include/descrobject.h @@ -12,4 +12,34 @@ } PyGetSetDef; +#define PyDescr_COMMON \ + PyObject_HEAD \ + PyTypeObject *d_type; \ + PyObject *d_name + +typedef struct { + PyDescr_COMMON; +} PyDescrObject; + +typedef struct { + PyDescr_COMMON; + PyMethodDef *d_method; +} PyMethodDescrObject; + +typedef struct { + PyDescr_COMMON; + struct PyMemberDef *d_member; +} PyMemberDescrObject; + +typedef struct { + PyDescr_COMMON; + PyGetSetDef *d_getset; +} PyGetSetDescrObject; + +typedef struct { + PyDescr_COMMON; + struct wrapperbase *d_base; + void *d_wrapped; /* This can be any function pointer */ +} PyWrapperDescrObject; + #endif diff --git a/pypy/module/cpyext/include/floatobject.h b/pypy/module/cpyext/include/floatobject.h --- a/pypy/module/cpyext/include/floatobject.h +++ b/pypy/module/cpyext/include/floatobject.h @@ -3,10 +3,22 @@ #ifndef Py_FLOATOBJECT_H #define Py_FLOATOBJECT_H + +#ifdef _MSC_VER +#include +#include +#define copysign _copysign +#endif + #ifdef __cplusplus extern "C" { #endif +typedef struct { + PyObject_HEAD + double ob_fval; +} PyFloatObject; + #define PyFloat_STR_PRECISION 12 #ifdef Py_NAN diff --git a/pypy/module/cpyext/include/longobject.h b/pypy/module/cpyext/include/longobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/longobject.h @@ -0,0 +1,21 @@ +#ifndef Py_LONGOBJECT_H +#define Py_LONGOBJECT_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* why does cpython redefine these, and even supply an implementation in mystrtoul.c? +PyAPI_FUNC(unsigned long) PyOS_strtoul(const char *, char **, int); +PyAPI_FUNC(long) PyOS_strtol(const char *, char **, int); +*/ + +#define PyOS_strtoul strtoul +#define PyOS_strtol strtoul + +#ifdef __cplusplus +} +#endif +#endif /* !Py_LONGOBJECT_H */ diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/memoryobject.h @@ -0,0 +1,14 @@ +#ifndef Py_MEMORYOBJECT_H +#define Py_MEMORYOBJECT_H + +#ifdef __cplusplus +extern "C" { +#endif + + + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MEMORYOBJECT_H */ diff --git a/pypy/module/cpyext/include/numpy/README b/pypy/module/cpyext/include/numpy/README new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/README @@ -0,0 +1,8 @@ +headers for the micronumpy multiarray and umath modules, +as used by https://bitbucket.org/pypy/numpy. They are needed by +downstream packages that depend on numpy, like matplotlib, but can +be slightly non-compatible with traditional numpy C-API use cases. + +The trick to including these headers is in get_include, located in +numpy/lib/utils.py. They will be ignored by an upstream build of numpy +since the /numpy/core/include path will be used instead diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,11 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +static int import_array(){return 0;}; +static int _import_array(){return 0;}; +static int _import_math(){return 0;}; + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,6 +1,8 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 */ -/* For testing ndarrayobject only */ +/* NDArray object interface - S. H. Muller, 2013/07/26 + * It will be copied by numpy/core/setup.py by install_data to + * site-packages/numpy/core/includes/numpy +*/ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -8,8 +10,14 @@ extern "C" { #endif +#include "pypy_numpy.h" +#include "old_defines.h" #include "npy_common.h" -#include "ndarraytypes.h" +#include "__multiarray_api.h" + +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -17,20 +25,206 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -/* functions defined in ndarrayobject.c*/ +#ifndef NDARRAYTYPES_H +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +/* data types copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + + +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_FLAGS _PyArray_FLAGS + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) + +#define PyArray_New _PyArray_New +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); -PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,9 +1,69 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H -/* For testing ndarrayobject only */ +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" -#include "numpy/npy_common.h" +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -31,6 +91,18 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + /* * These characters correspond to the array type and the struct * module @@ -85,6 +157,27 @@ }; typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -93,6 +186,7 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -106,6 +200,729 @@ NPY_KEEPORDER=2 } NPY_ORDER; +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* flags describing data type */ + char flags; + /* number representing this type */ + int type_num; + /* element size (itemsize) for this type */ + int elsize; + /* alignment needed for this type */ + int alignment; + /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + struct _arr_descr *subarray; + /* + * The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ + PyObject *fields; + /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ + PyObject *names; + /* + * a table of functions specific for each + * basic data descriptor + */ + PyArray_ArrFuncs *f; + /* Metadata about this dtype */ + PyObject *metadata; + /* + * Metadata specific to the C implementation + * of the particular dtype. This was added + * for NumPy 1.7.0. + */ + NpyAuxData *c_metadata; +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of UPDATEIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one From pypy.commits at gmail.com Wed Apr 27 11:22:08 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 27 Apr 2016 08:22:08 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: don't put anything into __init__.py Message-ID: <5720d920.0f801c0a.51e59.0db1@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r83989:c6292a877bc4 Date: 2016-04-27 18:09 +0300 http://bitbucket.org/pypy/pypy/changeset/c6292a877bc4/ Log: don't put anything into __init__.py diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py --- a/pypy/interpreter/test/test_extmodules.py +++ b/pypy/interpreter/test/test_extmodules.py @@ -2,7 +2,7 @@ import pytest from pypy.config.pypyoption import get_pypy_config -from pypy.objspace.std import StdObjSpace +from pypy.objspace.std.objspace import StdObjSpace from rpython.tool.udir import udir mod_init = """ diff --git a/pypy/objspace/std/__init__.py b/pypy/objspace/std/__init__.py --- a/pypy/objspace/std/__init__.py +++ b/pypy/objspace/std/__init__.py @@ -1,2 +0,0 @@ -from pypy.objspace.std.objspace import StdObjSpace -Space = StdObjSpace diff --git a/pypy/tool/option.py b/pypy/tool/option.py --- a/pypy/tool/option.py +++ b/pypy/tool/option.py @@ -29,7 +29,5 @@ return config def make_objspace(config): - mod = __import__('pypy.objspace.std', - None, None, ['Space']) - Space = mod.Space + from pypy.objspace.std.objspace import StdObjSpace as Space return Space(config) From pypy.commits at gmail.com Wed Apr 27 11:22:10 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 27 Apr 2016 08:22:10 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: - try to reduce duplication of methods by putting a lot of the methods that Message-ID: <5720d922.6614c20a.243bf.ffff9c48@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r83990:a1a3b6a39592 Date: 2016-04-27 18:13 +0300 http://bitbucket.org/pypy/pypy/changeset/a1a3b6a39592/ Log: - try to reduce duplication of methods by putting a lot of the methods that mapdicts defines into W_Root. That way, the methods aren't duplicated into every user-defined subclass. - only inline five fields into the user-defined subclasses of object, and all new-style classes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id, specialize) + compute_unique_id, specialize, import_from_mixin) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX @@ -16,6 +16,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary +from pypy.objspace.std.basemapdictobject import RootObjectMapdictMixin + __all__ = ['ObjSpace', 'OperationError', 'W_Root'] @@ -30,8 +32,9 @@ __slots__ = ('__weakref__',) user_overridden_class = False - def getdict(self, space): - return None + # a lot of the default functionality assumes mapdict now. + # import those methods + import_from_mixin(RootObjectMapdictMixin) def getdictvalue(self, space, attr): w_dict = self.getdict(space) @@ -46,29 +49,18 @@ return True return False - def deldictvalue(self, space, attr): - w_dict = self.getdict(space) - if w_dict is not None: - try: - space.delitem(w_dict, space.wrap(attr)) - return True - except OperationError, ex: - if not ex.match(space, space.w_KeyError): - raise - return False + # deldictvalue, getdict, setdict are mixed in from basemapdictobject + # def deldictvalue(self, space, attrname): + # def getdict(self, space): + # def setdict(self, space, w_dict): - def setdict(self, space, w_dict): - raise oefmt(space.w_TypeError, - "attribute '__dict__' of %T objects is not writable", - self) # to be used directly only by space.type implementations def getclass(self, space): return space.gettypeobject(self.typedef) - def setclass(self, space, w_subtype): - raise OperationError(space.w_TypeError, - space.wrap("__class__ assignment: only for heap types")) + # setclass is mixed in from basemapdictobject + # def setclass(self, space, w_cls): def user_setup(self, space, w_subtype): raise NotImplementedError("only for interp-level user subclasses " @@ -106,14 +98,10 @@ return space.wrap("<%s at 0x%s%s>" % (info, addrstring, moreinfo)) - def getslotvalue(self, index): - raise NotImplementedError - - def setslotvalue(self, index, w_val): - raise NotImplementedError - - def delslotvalue(self, index): - raise NotImplementedError + # mixed in from basemapdictobject are: getslotvalue, setslotvalue, delslotvalue + # def getslotvalue(self, index): + # def setslotvalue(self, index, w_val): + # def delslotvalue(self, slotindex): def descr_call_mismatch(self, space, opname, RequiredClass, args): if RequiredClass is None: @@ -125,15 +113,10 @@ # used by _weakref implemenation - def getweakref(self): - return None - - def setweakref(self, space, weakreflifeline): - raise oefmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) - - def delweakref(self): - pass + # mixed in from basemapdictobject are: getweakref, setweakref, delweakref + # def getweakref(self): + # def setweakref(self, space, weakreflifeline): + # def delweakref(self): def clear_all_weakrefs(self): """Call this at the beginning of interp-level __del__() methods @@ -171,19 +154,6 @@ self.__already_enqueued_for_destruction += (callback,) space.user_del_action.register_callback(self, callback, descrname) - # hooks that the mapdict implementations needs: - def _get_mapdict_map(self): - return None - def _set_mapdict_map(self, map): - raise NotImplementedError - def _mapdict_read_storage(self, index): - raise NotImplementedError - def _mapdict_write_storage(self, index, value): - raise NotImplementedError - def _mapdict_storage_length(self): - raise NotImplementedError - def _set_mapdict_storage_and_map(self, storage, map): - raise NotImplementedError # ------------------------------------------------------------------- diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -362,6 +362,26 @@ """) assert seen == [1] + def test_mapdict_number_of_slots(self): + space = self.space + a, b, c = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + a = A() + a.x = 1 + class B: + pass + b = B() + b.x = 1 + class C(int): + pass + c = C(1) + c.x = 1 + return a, b, c + """), 3) + assert not hasattr(a, "storage") + assert not hasattr(b, "storage") + assert hasattr(c, "storage") class AppTestTypeDef: @@ -423,3 +443,4 @@ def f(): return x assert f.__closure__[0].cell_contents is x + diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -124,22 +124,23 @@ def _getusercls(config, cls, wants_del, reallywantdict=False): from rpython.rlib import objectmodel + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.objspace.std.mapdict import (BaseUserClassMapdict, - MapdictDictSupport, MapdictWeakrefSupport, - _make_storage_mixin_size_n) + MapdictDictSupport, + _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + mixins_needed = [BaseUserClassMapdict] + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) if reallywantdict or not typedef.hasdict: # the type has no dict, mapdict to provide the dict mixins_needed.append(MapdictDictSupport) name += "Dict" - if not typedef.weakrefable: - # the type does not support weakrefs yet, mapdict to provide weakref - # support - mixins_needed.append(MapdictWeakrefSupport) - name += "Weakrefable" if wants_del: name += "Del" parent_destructor = getattr(cls, '__del__', None) diff --git a/pypy/objspace/std/basemapdictobject.py b/pypy/objspace/std/basemapdictobject.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/basemapdictobject.py @@ -0,0 +1,192 @@ +# this file contains the shared objspace method implementation that are +# imported into W_Root. All W_Root objects have these methods, but most of them +# really only make sense for user-defined subclasses. It is however important +# that they are shared by all subclasses of W_Root. + + +DICT = 0 +SPECIAL = 1 +INVALID = 2 +SLOTS_STARTING_FROM = 3 + + +class RootObjectMapdictMixin(object): + # hooks that the mapdict implementations needs. + # these will be overridden in user-defined subclasses + + def _get_mapdict_map(self): + # if this method returns None, there is no map, thus the class is no + # user-defined subclass + return None + + def _set_mapdict_map(self, map): + raise NotImplementedError + + def _mapdict_read_storage(self, index): + raise NotImplementedError + + def _mapdict_write_storage(self, index, value): + raise NotImplementedError + + def _mapdict_storage_length(self): + raise NotImplementedError + + def _set_mapdict_storage_and_map(self, storage, map): + raise NotImplementedError + + def _mapdict_init_empty(self, map): + raise NotImplementedError + + # ____________________________________________________________ + # objspace interface + + + # class handling + + # getclass is not done here, it makes sense to really specialize this per class + + def setclass(self, space, w_cls): + from pypy.interpreter.error import OperationError + map = self._get_mapdict_map() + if map is None: + raise OperationError(space.w_TypeError, + space.wrap("__class__ assignment: only for heap types")) + new_obj = map.set_terminator(self, w_cls.terminator) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) + + + # dict handling + + # getdictvalue and setdictvalue are not done here, for performance reasons + + def deldictvalue(self, space, attrname): + from pypy.interpreter.error import OperationError + map = self._get_mapdict_map() + if map is None: + # check whether it has a dict and use that + w_dict = self.getdict(space) + if w_dict is not None: + try: + space.delitem(w_dict, space.wrap(attrname)) + return True + except OperationError, ex: + if not ex.match(space, space.w_KeyError): + raise + return False + new_obj = map.delete(self, attrname, DICT) + if new_obj is None: + return False + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) + return True + + def getdict(self, space): + from pypy.objspace.std.mapdict import MapDictStrategy + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject + map = self._get_mapdict_map() + if map is None: + return None + terminator = map.terminator + if not terminator.has_dict: + return None + w_dict = map.read(self, "dict", SPECIAL) + if w_dict is not None: + assert isinstance(w_dict, W_DictMultiObject) + return w_dict + + strategy = space.fromcache(MapDictStrategy) + storage = strategy.erase(self) + w_dict = W_DictObject(space, strategy, storage) + flag = map.write(self, "dict", SPECIAL, w_dict) + assert flag + return w_dict + + def setdict(self, space, w_dict): + from pypy.interpreter.error import OperationError, oefmt + from pypy.objspace.std.mapdict import MapDictStrategy + from pypy.objspace.std.dictmultiobject import W_DictMultiObject + map = self._get_mapdict_map() + if map is None or not map.terminator.has_dict: + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) + terminator = map.terminator + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting dictionary to a non-dict")) + assert isinstance(w_dict, W_DictMultiObject) + w_olddict = self.getdict(space) + assert isinstance(w_olddict, W_DictMultiObject) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + + + # slots + + def getslotvalue(self, slotindex): + map = self._get_mapdict_map() + if map is None: + # not a user-defined subclass + raise NotImplementedError + index = SLOTS_STARTING_FROM + slotindex + return map.read(self, "slot", index) + + def setslotvalue(self, slotindex, w_value): + map = self._get_mapdict_map() + if map is None: + # not a user-defined subclass + raise NotImplementedError + index = SLOTS_STARTING_FROM + slotindex + map.write(self, "slot", index, w_value) + + def delslotvalue(self, slotindex): + map = self._get_mapdict_map() + if map is None: + # not a user-defined subclass + raise NotImplementedError + index = SLOTS_STARTING_FROM + slotindex + new_obj = map.delete(self, "slot", index) + if new_obj is None: + return False + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) + return True + + + # weakrefs + + def getweakref(self): + from pypy.module._weakref.interp__weakref import WeakrefLifeline + map = self._get_mapdict_map() + if map is None: + return None # not a user-defined subclass + lifeline = map.read(self, "weakref", SPECIAL) + if lifeline is None: + return None + assert isinstance(lifeline, WeakrefLifeline) + return lifeline + getweakref._cannot_really_call_random_things_ = True + + def setweakref(self, space, weakreflifeline): + from pypy.module._weakref.interp__weakref import WeakrefLifeline + map = self._get_mapdict_map() + if map is None: + # not a user-defined subclass + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) + assert isinstance(weakreflifeline, WeakrefLifeline) + map.write(self, "weakref", SPECIAL, weakreflifeline) + setweakref._cannot_really_call_random_things_ = True + + def delweakref(self): + map = self._get_mapdict_map() + if map is None: + return + map.write(self, "weakref", SPECIAL, None) + delweakref._cannot_really_call_random_things_ = True diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -10,6 +10,8 @@ W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell +from pypy.objspace.std.basemapdictobject import (DICT, SPECIAL, + SLOTS_STARTING_FROM, INVALID) erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item") @@ -277,7 +279,7 @@ def copy(self, obj): result = Object() result.space = self.space - result._init_empty(self) + result._mapdict_init_empty(self) return result def length(self): @@ -286,7 +288,7 @@ def set_terminator(self, obj, terminator): result = Object() result.space = self.space - result._init_empty(terminator) + result._mapdict_init_empty(terminator) return result def remove_dict_entries(self, obj): @@ -297,6 +299,9 @@ class DictTerminator(Terminator): _immutable_fields_ = ['devolved_dict_terminator'] + + has_dict = True + def __init__(self, space, w_cls): Terminator.__init__(self, space, w_cls) self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls) @@ -304,11 +309,13 @@ def materialize_r_dict(self, space, obj, dict_w): result = Object() result.space = space - result._init_empty(self.devolved_dict_terminator) + result._mapdict_init_empty(self.devolved_dict_terminator) return result class NoDictTerminator(Terminator): + has_dict = False + def _write_terminator(self, obj, name, index, w_value): if index == DICT: return False @@ -316,6 +323,8 @@ class DevolvedDictTerminator(Terminator): + has_dict = True + def _read_terminator(self, obj, name, index): if index == DICT: space = self.space @@ -417,11 +426,6 @@ def __repr__(self): return "" % (self.name, self.index, self.storageindex, self.back) -def _become(w_obj, new_obj): - # this is like the _become method, really, but we cannot use that due to - # RPython reasons - w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - class MapAttrCache(object): def __init__(self, space): SIZE = 1 << space.config.objspace.std.methodcachesizeexp @@ -445,11 +449,6 @@ # ____________________________________________________________ # object implementation -DICT = 0 -SPECIAL = 1 -INVALID = 2 -SLOTS_STARTING_FROM = 3 - # a little bit of a mess of mixin classes that implement various pieces of # objspace user object functionality in terms of mapdict @@ -457,16 +456,13 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _init_empty, _mapdict_read_storage, + # assumes presence of _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map # _____________________________________________ # methods needed for mapdict - def _become(self, new_obj): - self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - def _get_mapdict_map(self): return jit.promote(self.map) def _set_mapdict_map(self, map): @@ -480,59 +476,13 @@ def getclass(self, space): return self._get_mapdict_map().terminator.w_cls - def setclass(self, space, w_cls): - new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) - self._become(new_obj) - def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject self.space = space assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) - self._init_empty(w_subtype.terminator) - - - # methods needed for slots - - def getslotvalue(self, slotindex): - index = SLOTS_STARTING_FROM + slotindex - return self._get_mapdict_map().read(self, "slot", index) - - def setslotvalue(self, slotindex, w_value): - index = SLOTS_STARTING_FROM + slotindex - self._get_mapdict_map().write(self, "slot", index, w_value) - - def delslotvalue(self, slotindex): - index = SLOTS_STARTING_FROM + slotindex - new_obj = self._get_mapdict_map().delete(self, "slot", index) - if new_obj is None: - return False - self._become(new_obj) - return True - - -class MapdictWeakrefSupport(object): - # stuff used by the _weakref implementation - - def getweakref(self): - from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) - if lifeline is None: - return None - assert isinstance(lifeline, WeakrefLifeline) - return lifeline - getweakref._cannot_really_call_random_things_ = True - - def setweakref(self, space, weakreflifeline): - from pypy.module._weakref.interp__weakref import WeakrefLifeline - assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) - setweakref._cannot_really_call_random_things_ = True - - def delweakref(self): - self._get_mapdict_map().write(self, "weakref", SPECIAL, None) - delweakref._cannot_really_call_random_things_ = True + self._mapdict_init_empty(w_subtype.terminator) class MapdictDictSupport(object): @@ -545,61 +495,9 @@ def setdictvalue(self, space, attrname, w_value): return self._get_mapdict_map().write(self, attrname, DICT, w_value) - def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, attrname, DICT) - if new_obj is None: - return False - self._become(new_obj) - return True - - def getdict(self, space): - return _obj_getdict(self, space) - - def setdict(self, space, w_dict): - _obj_setdict(self, space, w_dict) - -# a couple of helpers for the classes above, factored out to reduce -# the translated code size - - at objectmodel.dont_inline -def _obj_getdict(self, space): - terminator = self._get_mapdict_map().terminator - assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) - if w_dict is not None: - assert isinstance(w_dict, W_DictMultiObject) - return w_dict - - strategy = space.fromcache(MapDictStrategy) - storage = strategy.erase(self) - w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag - return w_dict - - at objectmodel.dont_inline -def _obj_setdict(self, space, w_dict): - from pypy.interpreter.error import OperationError - terminator = self._get_mapdict_map().terminator - assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - assert isinstance(w_dict, W_DictMultiObject) - w_olddict = self.getdict(space) - assert isinstance(w_olddict, W_DictMultiObject) - # The old dict has got 'self' as dstorage, but we are about to - # change self's ("dict", SPECIAL) attribute to point to the - # new dict. If the old dict was using the MapDictStrategy, we - # have to force it now: otherwise it would remain an empty - # shell that continues to delegate to 'self'. - if type(w_olddict.get_strategy()) is MapDictStrategy: - w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag class MapdictStorageMixin(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) @@ -622,7 +520,6 @@ objectmodel.import_from_mixin(MapdictStorageMixin) objectmodel.import_from_mixin(BaseUserClassMapdict) - objectmodel.import_from_mixin(MapdictWeakrefSupport) class Object(W_Root): @@ -630,7 +527,6 @@ objectmodel.import_from_mixin(MapdictStorageMixin) objectmodel.import_from_mixin(BaseUserClassMapdict) - objectmodel.import_from_mixin(MapdictWeakrefSupport) objectmodel.import_from_mixin(MapdictDictSupport) @@ -643,7 +539,7 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) setattr(self, valnmin1, erase_item(None)) @@ -731,7 +627,7 @@ def get_empty_storage(self): w_result = Object() terminator = self.space.fromcache(get_terminator_for_dicts) - w_result._init_empty(terminator) + w_result._mapdict_init_empty(terminator) return self.erase(w_result) def switch_to_object_strategy(self, w_dict): @@ -811,7 +707,7 @@ def clear(self, w_dict): w_obj = self.unerase(w_dict.dstorage) new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj) - _become(w_obj, new_obj) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def popitem(self, w_dict): curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) @@ -833,10 +729,10 @@ return MapDictIteratorItems(self.space, self, w_dict) -def materialize_r_dict(space, obj, dict_w): - map = obj._get_mapdict_map() - new_obj = map.materialize_r_dict(space, obj, dict_w) - _become(obj, new_obj) +def materialize_r_dict(space, w_obj, dict_w): + map = w_obj._get_mapdict_map() + new_obj = map.materialize_r_dict(space, w_obj, dict_w) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) class MapDictIteratorKeys(BaseKeyIterator): def __init__(self, space, strategy, dictimplementation): From pypy.commits at gmail.com Wed Apr 27 12:11:07 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 09:11:07 -0700 (PDT) Subject: [pypy-commit] pypy default: Remove unnecessary import, which may trigger an unwanted early import of 'types' Message-ID: <5720e49b.4412c30a.fec01.ffffae93@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83991:3d4128d02a5e Date: 2016-04-27 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/3d4128d02a5e/ Log: Remove unnecessary import, which may trigger an unwanted early import of 'types' diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -5,7 +5,6 @@ equivalent to x+y. ''' -import types import __pypy__ From pypy.commits at gmail.com Wed Apr 27 12:31:39 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 27 Apr 2016 09:31:39 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add a draft blog post Message-ID: <5720e96b.aa5ec20a.71e38.ffffb09f@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5637:2d7ef522bdea Date: 2016-04-27 19:29 +0300 http://bitbucket.org/pypy/extradoc/changeset/2d7ef522bdea/ Log: add a draft blog post diff --git a/blog/draft/cpyext-ext.rst b/blog/draft/cpyext-ext.rst new file mode 100644 --- /dev/null +++ b/blog/draft/cpyext-ext.rst @@ -0,0 +1,61 @@ + +C-API Support update Part II +============================ + +We have been working on an upgrade to our C-API support (in the cpyext module), +and have merged the second piece of the puzzle to the PyPy trunk. The first +piece was an overhauling of the way refcounted PyObjects are reflected in +interpreter level objects, and was completed in time for PyPy 5.0 (here_ is +the relevant blog post). The current upgrade focused on implementing all the +necessary C-API functions to build and run numpy from upstream code. We are +pleased to report we have come close to succeeding, using our fork_ of the +numpy repo (which introduces only very minor changes) we can now pass over +90% of the numpy test suite. We still fail over 400 of the 5900 tests, but +much of numpy is usable as-is. As always with cpyext, it is expensive to call +into the C-API, we would love to hear how you are using it and what the +performance is in real-life applications. We reccomend using PyPy in a +virtualenv_, instructions to getting started are on the forked repo's README. + +.. _here: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _fork: https://github.com/pypy/numpy +.. _virtualenv: https://virtualenv.pypa.io + +What Changed +============ + +PyPy relies on test-driven development. We can now write c-code snippets that +exercize the C-API, and test them on a host python system (such as CPython or +post-translation PyPy) or as non-translated tests. This greatly enhanced our +ability to compare and contrast CPython's behaviour with PyPy's. + +We then were able to test and fix edge cases in the C-API and add missing +functions. We improved threading support through the C-API. All function slots +should now be filled, and type inheritance should also now be more fully +supported. We created a list strategy specifically for PyObjects for fast +access via C. + +Here is a more complete list of the changed functionality in the cpyext-ext branch: + + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots + +Wait a minute, I thought PyPy already had numpy? +================================================ +XXX plans for the future + +Please try it out and let us know how it works for you + +The PyPy Team From pypy.commits at gmail.com Wed Apr 27 12:56:10 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 09:56:10 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2284: fix urls Message-ID: <5720ef2a.89cbc20a.a5dd1.ffffbbb9@mx.google.com> Author: Armin Rigo Branch: Changeset: r83992:9d38316c09dc Date: 2016-04-27 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/9d38316c09dc/ Log: Issue #2284: fix urls diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -12,9 +12,9 @@ The work on the cling backend has so far been done only for CPython, but bringing it to PyPy is a lot less work than developing it in the first place. -.. _Reflex: http://root.cern.ch/drupal/content/reflex -.. _CINT: http://root.cern.ch/drupal/content/cint -.. _cling: http://root.cern.ch/drupal/content/cling +.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _CINT: https://root.cern.ch/introduction-cint +.. _cling: https://root.cern.ch/cling .. _llvm: http://llvm.org/ .. _clang: http://clang.llvm.org/ @@ -283,7 +283,8 @@ core reflection set, but for the moment assume we want to have it in the reflection library that we are building for this example. -The ``genreflex`` script can be steered using a so-called `selection file`_, +The ``genreflex`` script can be steered using a so-called `selection file`_ +(see "Generating Reflex Dictionaries") which is a simple XML file specifying, either explicitly or by using a pattern, which classes, variables, namespaces, etc. to select from the given header file. @@ -305,7 +306,7 @@ -.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries +.. _selection file: https://root.cern.ch/how/how-use-reflex Now the reflection info can be generated and compiled:: @@ -811,7 +812,7 @@ immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment variable. -.. _PyROOT: http://root.cern.ch/drupal/content/pyroot +.. _PyROOT: https://root.cern.ch/pyroot There are a couple of minor differences between PyCintex and cppyy, most to do with naming. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -79,7 +79,7 @@ :doc:`Full details ` are `available here `. .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: http://root.cern.ch/drupal/content/reflex +.. _Reflex: https://root.cern.ch/how/how-use-reflex RPython Mixed Modules From pypy.commits at gmail.com Wed Apr 27 13:35:51 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Apr 2016 10:35:51 -0700 (PDT) Subject: [pypy-commit] pypy remove-raisingops: Revert the changes in flatten.py, and handle it by jtransform'ing Message-ID: <5720f877.c653c20a.7967e.ffffcdf9@mx.google.com> Author: Armin Rigo Branch: remove-raisingops Changeset: r83993:36d247e2bc68 Date: 2016-04-27 19:35 +0200 http://bitbucket.org/pypy/pypy/changeset/36d247e2bc68/ Log: Revert the changes in flatten.py, and handle it by jtransform'ing the graphs with calls to ll_int_add_ovf to graphs with 'int_add_ovf' (which are then handled by flatten.py) diff --git a/rpython/jit/codewriter/flatten.py b/rpython/jit/codewriter/flatten.py --- a/rpython/jit/codewriter/flatten.py +++ b/rpython/jit/codewriter/flatten.py @@ -103,7 +103,7 @@ self.seen_blocks = {} self.make_bytecode_block(self.graph.startblock) - def make_bytecode_block(self, block): + def make_bytecode_block(self, block, handling_ovf=False): if block.exits == (): self.make_return(block.inputargs) return @@ -117,10 +117,15 @@ # operations = block.operations for i, op in enumerate(operations): - assert '_ovf' not in op.opname # should not exist any more + if '_ovf' in op.opname: + if (len(block.exits) not in (2, 3) or + block.exitswitch is not c_last_exception): + raise Exception("detected a block containing ovfcheck()" + " but no OverflowError is caught, this" + " is not legal in jitted blocks") self.serialize_op(op) # - self.insert_exits(block) + self.insert_exits(block, handling_ovf) def make_return(self, args): if len(args) == 1: @@ -140,16 +145,16 @@ raise Exception("?") self.emitline("---") - def make_link(self, link): + def make_link(self, link, handling_ovf): if (link.target.exits == () and link.last_exception not in link.args and link.last_exc_value not in link.args): self.make_return(link.args) # optimization only return self.insert_renamings(link) - self.make_bytecode_block(link.target) + self.make_bytecode_block(link.target, handling_ovf) - def make_exception_link(self, link): + def make_exception_link(self, link, handling_ovf): # Like make_link(), but also introduces the 'last_exception' and # 'last_exc_value' as variables if needed. Also check if the link # is jumping directly to the re-raising exception block. @@ -157,31 +162,52 @@ assert link.last_exc_value is not None if link.target.operations == () and link.args == [link.last_exception, link.last_exc_value]: - self.emitline("reraise") + if handling_ovf: + exc_data = self.cpu.rtyper.exceptiondata + ll_ovf = exc_data.get_standard_ll_exc_instance_by_class( + OverflowError) + c = Constant(ll_ovf, concretetype=lltype.typeOf(ll_ovf)) + self.emitline("raise", c) + else: + self.emitline("reraise") self.emitline("---") return # done - self.make_link(link) + self.make_link(link, handling_ovf) - def insert_exits(self, block): + def insert_exits(self, block, handling_ovf=False): if len(block.exits) == 1: # A single link, fall-through link = block.exits[0] assert link.exitcase in (None, False, True) # the cases False or True should not really occur, but can show # up in the manually hacked graphs for generators... - self.make_link(link) + self.make_link(link, handling_ovf) # elif block.canraise: # An exception block. See test_exc_exitswitch in test_flatten.py # for an example of what kind of code this makes. index = -1 opname = block.operations[index].opname - assert '_ovf' not in opname # should not exist any more - while True: - lastopname = block.operations[index].opname - if lastopname != '-live-': - break - index -= 1 + if '_ovf' in opname: + # ovf checking operation as a lat thing, -live- should be + # one before it + line = self.popline() + self.emitline(opname[:7] + '_jump_if_ovf', + TLabel(block.exits[1]), *line[1:]) + assert len(block.exits) in (2, 3) + self.make_link(block.exits[0], False) + self.emitline(Label(block.exits[1])) + self.make_exception_link(block.exits[1], True) + if len(block.exits) == 3: + assert block.exits[2].exitcase is Exception + self.make_exception_link(block.exits[2], False) + return + else: + while True: + lastopname = block.operations[index].opname + if lastopname != '-live-': + break + index -= 1 assert block.exits[0].exitcase is None # is this always True? # if not self._include_all_exc_links: @@ -235,10 +261,10 @@ #if not livebefore: # self.emitline('-live-', TLabel(linkfalse)) # true path: - self.make_link(linktrue) + self.make_link(linktrue, handling_ovf) # false path: self.emitline(Label(linkfalse)) - self.make_link(linkfalse) + self.make_link(linkfalse, handling_ovf) # else: # A switch. @@ -261,7 +287,7 @@ switchdict) # emit the default path if block.exits[-1].exitcase == 'default': - self.make_link(block.exits[-1]) + self.make_link(block.exits[-1], handling_ovf) else: self.emitline("unreachable") self.emitline("---") @@ -275,7 +301,7 @@ # if the switched value doesn't match any case. self.emitline(Label(switch)) self.emitline('-live-') - self.make_link(switch) + self.make_link(switch, handling_ovf) def insert_renamings(self, link): renamings = {} diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -425,6 +425,8 @@ # dispatch to various implementations depending on the oopspec_name if oopspec_name.startswith('list.') or oopspec_name.startswith('newlist'): prepare = self._handle_list_call + elif oopspec_name.startswith('int.'): + prepare = self._handle_int_ovf elif oopspec_name.startswith('stroruni.'): prepare = self._handle_stroruni_call elif oopspec_name == 'str.str2unicode': @@ -1900,6 +1902,14 @@ llmemory.cast_ptr_to_adr(c_func.value)) self.callcontrol.callinfocollection.add(oopspecindex, calldescr, func) + def _handle_int_ovf(self, op, oopspec_name, args): + assert oopspec_name in ('int.add_ovf', 'int.sub_ovf', 'int.mul_ovf') + op0 = SpaceOperation(oopspec_name.replace('.', '_'), args, op.result) + if oopspec_name != 'int.sub_ovf': + op0 = self._rewrite_symmetric(op0) + oplive = SpaceOperation('-live-', [], None) + return [oplive, op0] + def _handle_stroruni_call(self, op, oopspec_name, args): SoU = args[0].concretetype # Ptr(STR) or Ptr(UNICODE) can_raise_memoryerror = { diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -71,6 +71,9 @@ _descr_cannot_raise = FakeDescr() callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): + if op.args[0].value._obj._name.startswith( + ('ll_int_add_ovf', 'll_int_sub_ovf', 'll_int_mul_ovf')): + return 'builtin' return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, extraeffect=None, extradescr=None): @@ -478,7 +481,7 @@ except ZeroDivisionError: return -42 self.encoding_test(f, [7, 2], """ - residual_call_ir_i $<* fn int_floordiv_ovf_zer>, I[%i0, %i1], R[], -> %i2 + residual_call_ir_i $<* fn ll_int_floordiv_ovf_zer__Signed_Signed>, I[%i0, %i1], R[], -> %i2 -live- catch_exception L1 int_return %i2 @@ -505,7 +508,7 @@ return 42 # XXX so far, this really produces a int_mod_ovf_zer... self.encoding_test(f, [7, 2], """ - residual_call_ir_i $<* fn int_mod_ovf_zer>, I[%i0, %i1], R[], -> %i2 + residual_call_ir_i $<* fn ll_int_mod_ovf_zer__Signed_Signed>, I[%i0, %i1], R[], -> %i2 -live- catch_exception L1 int_return %i2 @@ -548,6 +551,36 @@ int_return $42 """, transform=True, liveness=True) + def test_int_sub_ovf(self): + def f(i, j): + try: + return ovfcheck(i - j) + except OverflowError: + return 42 + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_sub_jump_if_ovf L1, %i0, %i1 -> %i2 + int_return %i2 + --- + L1: + int_return $42 + """, transform=True, liveness=True) + + def test_int_mul_ovf(self): + def f(i, j): + try: + return ovfcheck(i * j) + except OverflowError: + return 42 + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_mul_jump_if_ovf L1, %i0, %i1 -> %i2 + int_return %i2 + --- + L1: + int_return $42 + """, transform=True, liveness=True) + def test_multiple_int_add_ovf(self): def f(i, j): try: diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -378,7 +378,6 @@ LLONG_BITS_1 = r_longlong.BITS - 1 LLLONG_BITS_1 = r_longlonglong.BITS - 1 INT_MIN = int(-(1 << INT_BITS_1)) -LLONG_MIN = r_longlong(-(1 << LLONG_BITS_1)) # ---------- floordiv ---------- @@ -505,35 +504,28 @@ # ---------- add, sub, mul ---------- - at jit.oopspec("add_ovf") + at jit.oopspec("int.add_ovf(x, y)") def ll_int_add_ovf(x, y): r = intmask(r_uint(x) + r_uint(y)) if r^x < 0 and r^y < 0: raise OverflowError("integer addition") return r - at jit.oopspec("add_ovf") + at jit.oopspec("int.add_ovf(x, y)") def ll_int_add_nonneg_ovf(x, y): # y can be assumed >= 0 r = intmask(r_uint(x) + r_uint(y)) if r < x: raise OverflowError("integer addition") return r - at jit.oopspec("sub_ovf") + at jit.oopspec("int.sub_ovf(x, y)") def ll_int_sub_ovf(x, y): r = intmask(r_uint(x) - r_uint(y)) if r^x < 0 and r^~y < 0: raise OverflowError("integer subtraction") return r - at jit.oopspec("sub_ovf") -def ll_llong_sub_ovf(x, y): - r = longlongmask(r_ulonglong(x) - r_ulonglong(y)) - if r^x < 0 and r^~y < 0: - raise OverflowError("longlong subtraction") - return r - - at jit.oopspec("mul_ovf") + at jit.oopspec("int.mul_ovf(a, b)") def ll_int_mul_ovf(a, b): if INT_BITS_1 < LLONG_BITS_1: rr = r_longlong(a) * r_longlong(b) @@ -578,23 +570,11 @@ raise OverflowError return -x -def ll_llong_neg_ovf(x): - if jit.we_are_jitted(): - return ll_llong_sub_ovf(0, x) - if x == LLONG_MIN: - raise OverflowError - return -x - def ll_int_abs_ovf(x): if x == INT_MIN: raise OverflowError return abs(x) -def ll_llong_abs_ovf(x): - if x == LLONG_MIN: - raise OverflowError - return abs(x) - #Helper functions for comparisons From pypy.commits at gmail.com Wed Apr 27 14:55:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 11:55:57 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: Catch SyntaxError on test_resource.py so that the test is skipped when trying to run py3 code on py2 Message-ID: <57210b3d.d5da1c0a.89c17.7263@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r83995:a07e00918a68 Date: 2016-04-27 19:46 +0100 http://bitbucket.org/pypy/pypy/changeset/a07e00918a68/ Log: Catch SyntaxError on test_resource.py so that the test is skipped when trying to run py3 code on py2 diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -6,7 +6,7 @@ try: from lib_pypy import resource -except ImportError as e: +except (ImportError, SyntaxError) as e: skip(str(e)) From pypy.commits at gmail.com Wed Apr 27 14:55:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 11:55:55 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: hg merge 73a49ec9edc3 Message-ID: <57210b3b.c9b0c20a.e1f64.ffffebb1@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r83994:4363df660a6b Date: 2016-04-27 18:52 +0100 http://bitbucket.org/pypy/pypy/changeset/4363df660a6b/ Log: hg merge 73a49ec9edc3 diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py --- a/lib_pypy/_collections.py +++ b/lib_pypy/_collections.py @@ -320,8 +320,7 @@ def __reduce_ex__(self, proto): return type(self), (list(self), self.maxlen) - def __hash__(self): - raise TypeError("deque objects are unhashable") + __hash__ = None def __copy__(self): return self.__class__(self, self.maxlen) diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,51 +1,22 @@ -from resource import _struct_rusage, struct_rusage -from ctypes import CDLL, c_int, POINTER, byref -from ctypes.util import find_library +from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] -libc = CDLL(find_library("c")) -c_wait3 = libc.wait3 -c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait3.restype = c_int - -c_wait4 = libc.wait4 -c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)] -c_wait4.restype = c_int - -def create_struct_rusage(c_struct): - return struct_rusage(( - float(c_struct.ru_utime), - float(c_struct.ru_stime), - c_struct.ru_maxrss, - c_struct.ru_ixrss, - c_struct.ru_idrss, - c_struct.ru_isrss, - c_struct.ru_minflt, - c_struct.ru_majflt, - c_struct.ru_nswap, - c_struct.ru_inblock, - c_struct.ru_oublock, - c_struct.ru_msgsnd, - c_struct.ru_msgrcv, - c_struct.ru_nsignals, - c_struct.ru_nvcsw, - c_struct.ru_nivcsw)) def wait3(options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait3(byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait3(status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage def wait4(pid, options): - status = c_int() - _rusage = _struct_rusage() - pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage)) + status = ffi.new("int *") + ru = ffi.new("struct rusage *") + pid = lib.wait4(pid, status, options, ru) - rusage = create_struct_rusage(_rusage) + rusage = _make_struct_rusage(ru) - return pid, status.value, rusage + return pid, status[0], rusage diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_resource_build.py @@ -0,0 +1,118 @@ +from cffi import FFI + +ffi = FFI() + +# Note: we don't directly expose 'struct timeval' or 'struct rlimit' + + +rlimit_consts = ''' +RLIMIT_CPU +RLIMIT_FSIZE +RLIMIT_DATA +RLIMIT_STACK +RLIMIT_CORE +RLIMIT_NOFILE +RLIMIT_OFILE +RLIMIT_VMEM +RLIMIT_AS +RLIMIT_RSS +RLIMIT_NPROC +RLIMIT_MEMLOCK +RLIMIT_SBSIZE +RLIM_INFINITY +RUSAGE_SELF +RUSAGE_CHILDREN +RUSAGE_BOTH +'''.split() + +rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s) + for s in rlimit_consts] + + +ffi.set_source("_resource_cffi", """ +#include +#include +#include +#include + +static const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[] = { +$RLIMIT_CONSTS + { NULL, 0 } +}; + +#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001) + +static double my_utime(struct rusage *input) +{ + return doubletime(input->ru_utime); +} + +static double my_stime(struct rusage *input) +{ + return doubletime(input->ru_stime); +} + +static int my_getrlimit(int resource, long long result[2]) +{ + struct rlimit rl; + if (getrlimit(resource, &rl) == -1) + return -1; + result[0] = rl.rlim_cur; + result[1] = rl.rlim_max; + return 0; +} + +static int my_setrlimit(int resource, long long cur, long long max) +{ + struct rlimit rl; + rl.rlim_cur = cur & RLIM_INFINITY; + rl.rlim_max = max & RLIM_INFINITY; + return setrlimit(resource, &rl); +} + +""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts))) + + +ffi.cdef(""" + +#define RLIM_NLIMITS ... + +const struct my_rlimit_def { + const char *name; + long long value; +} my_rlimit_consts[]; + +struct rusage { + long ru_maxrss; + long ru_ixrss; + long ru_idrss; + long ru_isrss; + long ru_minflt; + long ru_majflt; + long ru_nswap; + long ru_inblock; + long ru_oublock; + long ru_msgsnd; + long ru_msgrcv; + long ru_nsignals; + long ru_nvcsw; + long ru_nivcsw; + ...; +}; + +static double my_utime(struct rusage *); +static double my_stime(struct rusage *); +void getrusage(int who, struct rusage *result); +int my_getrlimit(int resource, long long result[2]); +int my_setrlimit(int resource, long long cur, long long max); + +int wait3(int *status, int options, struct rusage *rusage); +int wait4(int pid, int *status, int options, struct rusage *rusage); +""") + + +if __name__ == "__main__": + ffi.compile() diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,7 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -88,6 +89,12 @@ # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) @@ -98,7 +105,10 @@ #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) - parts.append('void __cffi_extern_python_start; ') + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) @@ -302,7 +312,7 @@ break # try: - self._inside_extern_python = False + self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -376,8 +386,10 @@ tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' - elif self._inside_extern_python: + elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) @@ -421,11 +433,9 @@ # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" - self._inside_extern_python = not self._inside_extern_python - assert self._inside_extern_python == ( - decl.name == '__cffi_extern_python_start') + self._inside_extern_python = decl.name else: - if self._inside_extern_python: + if self._inside_extern_python !='__cffi_extern_python_stop': raise api.CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -1145,11 +1145,11 @@ def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype - def _generate_cpy_dllexport_python_collecttype(self, tp, name): - self._generate_cpy_extern_python_collecttype(tp, name) - - def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1184,11 +1184,7 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - if dllexport: - tag = 'CFFI_DLLEXPORT' - else: - tag = 'static' - prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1206,8 +1202,14 @@ prnt() self._num_externpy += 1 + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + def _generate_cpy_dllexport_python_decl(self, tp, name): - self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: @@ -1220,8 +1222,9 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - def _generate_cpy_dllexport_python_ctx(self, tp, name): - self._generate_cpy_extern_python_ctx(tp, name) + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx def _string_literal(self, s): def _char_repr(c): diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty new file mode 100644 --- /dev/null +++ b/lib_pypy/ctypes_config_cache/.empty @@ -0,0 +1,1 @@ +dummy file to allow old buildbot configuration to run diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py deleted file mode 100644 diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/dumpcache.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys, os -from ctypes_configure import dumpcache - -def dumpcache2(basename, config): - size = 32 if sys.maxint <= 2**32 else 64 - filename = '_%s_%s_.py' % (basename, size) - dumpcache.dumpcache(__file__, filename, config) - # - filename = os.path.join(os.path.dirname(__file__), - '_%s_cache.py' % (basename,)) - g = open(filename, 'w') - print >> g, '''\ -import sys -_size = 32 if sys.maxsize <= 2**32 else 64 -# XXX relative import, should be removed together with -# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib -_mod = __import__("_%s_%%s_" %% (_size,), - globals(), locals(), ["*"], level=1) -globals().update(_mod.__dict__)\ -''' % (basename,) - g.close() diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/locale.ctc.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -'ctypes_configure' source for _locale.py. -Run this to rebuild _locale_cache.py. -""" - -from ctypes_configure.configure import (configure, ExternalCompilationInfo, - ConstantInteger, DefinedConstantInteger, SimpleType, check_eci) -import dumpcache - -# ____________________________________________________________ - -_CONSTANTS = [ - 'LC_CTYPE', - 'LC_TIME', - 'LC_COLLATE', - 'LC_MONETARY', - 'LC_MESSAGES', - 'LC_NUMERIC', - 'LC_ALL', - 'CHAR_MAX', -] - -class LocaleConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['limits.h', - 'locale.h']) -for key in _CONSTANTS: - setattr(LocaleConfigure, key, DefinedConstantInteger(key)) - -config = configure(LocaleConfigure, noerr=True) -for key, value in config.items(): - if value is None: - del config[key] - _CONSTANTS.remove(key) - -# ____________________________________________________________ - -eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h']) -HAS_LANGINFO = check_eci(eci) - -if HAS_LANGINFO: - # list of all possible names - langinfo_names = [ - "RADIXCHAR", "THOUSEP", "CRNCYSTR", - "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR", - "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT", - "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT", - ] - for i in range(1, 8): - langinfo_names.append("DAY_%d" % i) - langinfo_names.append("ABDAY_%d" % i) - for i in range(1, 13): - langinfo_names.append("MON_%d" % i) - langinfo_names.append("ABMON_%d" % i) - - class LanginfoConfigure: - _compilation_info_ = eci - nl_item = SimpleType('nl_item') - for key in langinfo_names: - setattr(LanginfoConfigure, key, DefinedConstantInteger(key)) - - langinfo_config = configure(LanginfoConfigure) - for key, value in langinfo_config.items(): - if value is None: - del langinfo_config[key] - langinfo_names.remove(key) - config.update(langinfo_config) - _CONSTANTS += langinfo_names - -# ____________________________________________________________ - -config['ALL_CONSTANTS'] = tuple(_CONSTANTS) -config['HAS_LANGINFO'] = HAS_LANGINFO -dumpcache.dumpcache2('locale', config) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py deleted file mode 100755 --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ /dev/null @@ -1,56 +0,0 @@ -#! /usr/bin/env python -# Run this script to rebuild all caches from the *.ctc.py files. - -import os, sys - -sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))) - -import py - -_dirpath = os.path.dirname(__file__) or os.curdir - -from rpython.tool.ansi_print import AnsiLogger -log = AnsiLogger("ctypes_config_cache") - - -def rebuild_one(name): - filename = os.path.join(_dirpath, name) - d = {'__file__': filename} - path = sys.path[:] - try: - sys.path.insert(0, _dirpath) - execfile(filename, d) - finally: - sys.path[:] = path - -def try_rebuild(): - size = 32 if sys.maxint <= 2**32 else 64 - # remove the files '_*_size_.py' - left = {} - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_%s_.py' % size) or - p.endswith('_%s_.pyc' % size)): - os.unlink(os.path.join(_dirpath, p)) - elif p.startswith('_') and (p.endswith('_.py') or - p.endswith('_.pyc')): - for i in range(2, len(p)-4): - left[p[:i]] = True - # remove the files '_*_cache.py' if there is no '_*_*_.py' left around - for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_cache.py') or - p.endswith('_cache.pyc')): - if p[:-9] not in left: - os.unlink(os.path.join(_dirpath, p)) - # - for p in os.listdir(_dirpath): - if p.endswith('.ctc.py'): - try: - rebuild_one(p) - except Exception, e: - log.ERROR("Running %s:\n %s: %s" % ( - os.path.join(_dirpath, p), - e.__class__.__name__, e)) - - -if __name__ == '__main__': - try_rebuild() diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/resource.ctc.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -'ctypes_configure' source for resource.py. -Run this to rebuild _resource_cache.py. -""" - - -from ctypes import sizeof -import dumpcache -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger, - SimpleType) - - -_CONSTANTS = ( - 'RLIM_INFINITY', - 'RLIM_NLIMITS', -) -_OPTIONAL_CONSTANTS = ( - 'RLIMIT_CPU', - 'RLIMIT_FSIZE', - 'RLIMIT_DATA', - 'RLIMIT_STACK', - 'RLIMIT_CORE', - 'RLIMIT_RSS', - 'RLIMIT_NPROC', - 'RLIMIT_NOFILE', - 'RLIMIT_OFILE', - 'RLIMIT_MEMLOCK', - 'RLIMIT_AS', - 'RLIMIT_LOCKS', - 'RLIMIT_SIGPENDING', - 'RLIMIT_MSGQUEUE', - 'RLIMIT_NICE', - 'RLIMIT_RTPRIO', - 'RLIMIT_VMEM', - - 'RUSAGE_BOTH', - 'RUSAGE_SELF', - 'RUSAGE_CHILDREN', -) - -# Setup our configure -class ResourceConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h']) - rlim_t = SimpleType('rlim_t') -for key in _CONSTANTS: - setattr(ResourceConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(ResourceConfigure, key, DefinedConstantInteger(key)) - -# Configure constants and types -config = configure(ResourceConfigure) -config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1 -optional_constants = [] -for key in _OPTIONAL_CONSTANTS: - if config[key] is not None: - optional_constants.append(key) - else: - del config[key] - -config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants) -dumpcache.dumpcache2('resource', config) diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py --- a/lib_pypy/pwd.py +++ b/lib_pypy/pwd.py @@ -1,4 +1,4 @@ -# ctypes implementation: Victor Stinner, 2008-05-08 +# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides access to the Unix password database. It is available on all Unix versions. diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py --- a/lib_pypy/resource.py +++ b/lib_pypy/resource.py @@ -1,15 +1,8 @@ -import sys -if sys.platform == 'win32': - raise ImportError('resource module not available for win32') +"""http://docs.python.org/library/resource""" -# load the platform-specific cache made by running resource.ctc.py -from ctypes_config_cache._resource_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes_support import get_errno -from ctypes import Structure, c_int, c_long, byref, POINTER +from _resource_cffi import ffi, lib from errno import EINVAL, EPERM -import _structseq +import _structseq, os try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -18,104 +11,37 @@ class error(Exception): pass +class struct_rusage(metaclass=_structseq.structseqtype): + """struct_rusage: Result from getrusage. -# Read required libc functions -_getrusage = libc.getrusage -_getrlimit = libc.getrlimit -_setrlimit = libc.setrlimit -try: - _getpagesize = libc.getpagesize - _getpagesize.argtypes = () - _getpagesize.restype = c_int -except AttributeError: - from os import sysconf - _getpagesize = None +This object may be accessed either as a tuple of + (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt, + nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw) +or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.""" + __metaclass__ = _structseq.structseqtype -class timeval(Structure): - _fields_ = ( - ("tv_sec", c_long), - ("tv_usec", c_long), - ) - def __str__(self): - return "(%s, %s)" % (self.tv_sec, self.tv_usec) + ru_utime = _structseq.structseqfield(0, "user time used") + ru_stime = _structseq.structseqfield(1, "system time used") + ru_maxrss = _structseq.structseqfield(2, "max. resident set size") + ru_ixrss = _structseq.structseqfield(3, "shared memory size") + ru_idrss = _structseq.structseqfield(4, "unshared data size") + ru_isrss = _structseq.structseqfield(5, "unshared stack size") + ru_minflt = _structseq.structseqfield(6, "page faults not requiring I/O") + ru_majflt = _structseq.structseqfield(7, "page faults requiring I/O") + ru_nswap = _structseq.structseqfield(8, "number of swap outs") + ru_inblock = _structseq.structseqfield(9, "block input operations") + ru_oublock = _structseq.structseqfield(10, "block output operations") + ru_msgsnd = _structseq.structseqfield(11, "IPC messages sent") + ru_msgrcv = _structseq.structseqfield(12, "IPC messages received") + ru_nsignals = _structseq.structseqfield(13,"signals received") + ru_nvcsw = _structseq.structseqfield(14, "voluntary context switches") + ru_nivcsw = _structseq.structseqfield(15, "involuntary context switches") - def __float__(self): - return self.tv_sec + self.tv_usec/1000000.0 - -class _struct_rusage(Structure): - _fields_ = ( - ("ru_utime", timeval), - ("ru_stime", timeval), - ("ru_maxrss", c_long), - ("ru_ixrss", c_long), - ("ru_idrss", c_long), - ("ru_isrss", c_long), - ("ru_minflt", c_long), - ("ru_majflt", c_long), - ("ru_nswap", c_long), - ("ru_inblock", c_long), - ("ru_oublock", c_long), - ("ru_msgsnd", c_long), - ("ru_msgrcv", c_long), - ("ru_nsignals", c_long), - ("ru_nvcsw", c_long), - ("ru_nivcsw", c_long), - ) - -_getrusage.argtypes = (c_int, POINTER(_struct_rusage)) -_getrusage.restype = c_int - - -class struct_rusage(metaclass=_structseq.structseqtype): - ru_utime = _structseq.structseqfield(0) - ru_stime = _structseq.structseqfield(1) - ru_maxrss = _structseq.structseqfield(2) - ru_ixrss = _structseq.structseqfield(3) - ru_idrss = _structseq.structseqfield(4) - ru_isrss = _structseq.structseqfield(5) - ru_minflt = _structseq.structseqfield(6) - ru_majflt = _structseq.structseqfield(7) - ru_nswap = _structseq.structseqfield(8) - ru_inblock = _structseq.structseqfield(9) - ru_oublock = _structseq.structseqfield(10) - ru_msgsnd = _structseq.structseqfield(11) - ru_msgrcv = _structseq.structseqfield(12) - ru_nsignals = _structseq.structseqfield(13) - ru_nvcsw = _structseq.structseqfield(14) - ru_nivcsw = _structseq.structseqfield(15) - - at builtinify -def rlimit_check_bounds(rlim_cur, rlim_max): - if rlim_cur > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_cur) - if rlim_max > rlim_t_max: - raise ValueError("%d does not fit into rlim_t" % rlim_max) - -class rlimit(Structure): - _fields_ = ( - ("rlim_cur", rlim_t), - ("rlim_max", rlim_t), - ) - -_getrlimit.argtypes = (c_int, POINTER(rlimit)) -_getrlimit.restype = c_int -_setrlimit.argtypes = (c_int, POINTER(rlimit)) -_setrlimit.restype = c_int - - - at builtinify -def getrusage(who): - ru = _struct_rusage() - ret = _getrusage(who, byref(ru)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - raise ValueError("invalid who parameter") - raise error(errno) +def _make_struct_rusage(ru): return struct_rusage(( - float(ru.ru_utime), - float(ru.ru_stime), + lib.my_utime(ru), + lib.my_stime(ru), ru.ru_maxrss, ru.ru_ixrss, ru.ru_idrss, @@ -133,48 +59,59 @@ )) @builtinify +def getrusage(who): + ru = ffi.new("struct rusage *") + if lib.getrusage(who, ru) == -1: + if ffi.errno == EINVAL: + raise ValueError("invalid who parameter") + raise error(ffi.errno) + return _make_struct_rusage(ru) + + at builtinify def getrlimit(resource): - if not(0 <= resource < RLIM_NLIMITS): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlim = rlimit() - ret = _getrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - raise error(errno) - return (rlim.rlim_cur, rlim.rlim_max) + result = ffi.new("long long[2]") + if lib.my_getrlimit(resource, result) == -1: + raise error(ffi.errno) + return (result[0], result[1]) @builtinify -def setrlimit(resource, rlim): - if not(0 <= resource < RLIM_NLIMITS): +def setrlimit(resource, limits): + if not (0 <= resource < lib.RLIM_NLIMITS): return ValueError("invalid resource specified") - rlimit_check_bounds(*rlim) - rlim = rlimit(rlim[0], rlim[1]) - ret = _setrlimit(resource, byref(rlim)) - if ret == -1: - errno = get_errno() - if errno == EINVAL: - return ValueError("current limit exceeds maximum limit") - elif errno == EPERM: - return ValueError("not allowed to raise maximum limit") + limits = tuple(limits) + if len(limits) != 2: + raise ValueError("expected a tuple of 2 integers") + + if lib.my_setrlimit(resource, limits[0], limits[1]) == -1: + if ffi.errno == EINVAL: + raise ValueError("current limit exceeds maximum limit") + elif ffi.errno == EPERM: + raise ValueError("not allowed to raise maximum limit") else: - raise error(errno) + raise error(ffi.errno) + @builtinify def getpagesize(): - if _getpagesize: - return _getpagesize() - else: - try: - return sysconf("SC_PAGE_SIZE") - except ValueError: - # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE - return sysconf("SC_PAGESIZE") + return os.sysconf("SC_PAGESIZE") -__all__ = ALL_CONSTANTS + ( - 'error', 'timeval', 'struct_rusage', 'rlimit', - 'getrusage', 'getrlimit', 'setrlimit', 'getpagesize', + +def _setup(): + all_constants = [] + p = lib.my_rlimit_consts + while p.name: + name = ffi.string(p.name) + globals()[name] = int(p.value) + all_constants.append(name) + p += 1 + return all_constants + +__all__ = tuple(_setup()) + ( + 'error', 'getpagesize', 'struct_rusage', + 'getrusage', 'getrlimit', 'setrlimit', ) - -del ALL_CONSTANTS +del _setup diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -344,10 +344,6 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() - space = make_objspace(config) # manually imports app_main.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 6): - py.test.skip("re-enable me in version 1.6") + if __version_info__ < (1, 7): + py.test.skip("re-enable me in version 1.7") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1511,7 +1511,9 @@ void boz(void); } """) - lib = verify(ffi, 'test_extern_python_1', "") + lib = verify(ffi, 'test_extern_python_1', """ + static void baz(int, int); /* forward */ + """) assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)") with FdWriteCapture() as f: res = lib.bar(4, 5) @@ -1745,6 +1747,35 @@ assert lib.mycb1(200) == 242 assert lib.indirect_call(300) == 342 +def test_extern_python_plus_c(): + ffi = FFI() + ffi.cdef(""" + extern "Python+C" int foo(int); + extern "C +\tPython" int bar(int); + int call_me(int); + """) + lib = verify(ffi, 'test_extern_python_plus_c', """ + int foo(int); + #ifdef __GNUC__ + __attribute__((visibility("hidden"))) + #endif + int bar(int); + + static int call_me(int x) { + return foo(x) - bar(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x * 42 + @ffi.def_extern() + def bar(x): + return x * 63 + assert lib.foo(100) == 4200 + assert lib.bar(100) == 6300 + assert lib.call_me(100) == -2100 + def test_introspect_function(): ffi = FFI() ffi.cdef("float f1(double);") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/empty.py @@ -0,0 +1,11 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api("") + +ffi.set_source("_empty_cffi", "") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -62,6 +62,12 @@ raises(IndexError, d.remove, 'c') assert len(d) == 0 + def test_deque_unhashable(self): + from collections import Hashable + d = self.get_deque() + raises(TypeError, hash, d) + assert not isinstance(d, Hashable) + class AppTestDequeExtra: spaceconfig = dict(usemodules=('binascii', 'struct',)) diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ /dev/null @@ -1,44 +0,0 @@ -import py -import sys, os -from rpython.tool.udir import udir - -dirpath = py.path.local(__file__).dirpath().dirpath().dirpath().dirpath() -dirpath = dirpath.join('lib_pypy').join('ctypes_config_cache') - - -def run(filename, outputname): - filepath = dirpath.join(filename) - tmpdir = udir.ensure('testcache-' + os.path.splitext(filename)[0], - dir=True) - tmpdir.join('dumpcache.py').write(dirpath.join('dumpcache.py').read()) - path = sys.path[:] - sys.modules.pop('dumpcache', None) - try: - sys.path.insert(0, str(tmpdir)) - execfile(str(filepath), {}) - finally: - sys.path[:] = path - sys.modules.pop('dumpcache', None) - # - outputpath = tmpdir.join(outputname) - assert outputpath.check(exists=1) - modname = os.path.splitext(outputname)[0] - try: - sys.path.insert(0, str(tmpdir)) - d = {} - execfile(str(outputpath), d) - finally: - sys.path[:] = path - return d - - -def test_resource(): - if sys.platform == 'win32': - py.test.skip('no resource module on this platform') - d = run('resource.ctc.py', '_resource_cache.py') - assert 'RLIM_NLIMITS' in d - -def test_locale(): - d = run('locale.ctc.py', '_locale_cache.py') - assert 'LC_ALL' in d - assert 'CHAR_MAX' in d diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,52 +1,36 @@ -# Generates the resource cache (it might be there already, but maybe not) +# Assumes that _resource_cffi is there already from __future__ import absolute_import import os +import py +from pypy.module.test_lib_pypy import test_resource # side-effect: skip() -import py -from lib_pypy.ctypes_config_cache import rebuild -from pypy.module.test_lib_pypy.support import import_lib_pypy +from lib_pypy import _pypy_wait +def test_os_wait3(): + wait3 = _pypy_wait.wait3 + exit_status = 0x33 + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait3(0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) -class AppTestOsWait: - - spaceconfig = dict(usemodules=('_rawffi', 'fcntl', 'itertools', 'select', - 'signal', '_posixsubprocess')) - - def setup_class(cls): - if not hasattr(os, "fork"): - py.test.skip("Need fork() to test wait3/wait4()") - rebuild.rebuild_one('resource.ctc.py') - cls.space.appexec([], "(): import ctypes") - cls.w__pypy_wait = import_lib_pypy( - cls.space, '_pypy_wait') - - def test_os_wait3(self): - import os - wait3 = self._pypy_wait.wait3 - exit_status = 0x33 - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = wait3(0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) - - def test_os_wait4(self): - import os - wait4 = self._pypy_wait.wait4 - exit_status = 0x33 - child = os.fork() - if child == 0: # in child - os._exit(exit_status) - else: - pid, status, rusage = wait4(child, 0) - assert child == pid - assert os.WIFEXITED(status) - assert os.WEXITSTATUS(status) == exit_status - assert isinstance(rusage.ru_utime, float) - assert isinstance(rusage.ru_maxrss, int) +def test_os_wait4(): + wait4 = _pypy_wait.wait4 + exit_status = 0x33 + child = os.fork() + if child == 0: # in child + os._exit(exit_status) + else: + pid, status, rusage = wait4(child, 0) + assert child == pid + assert os.WIFEXITED(status) + assert os.WEXITSTATUS(status) == exit_status + assert isinstance(rusage.ru_utime, float) + assert isinstance(rusage.ru_maxrss, int) diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -1,45 +1,49 @@ from __future__ import absolute_import -from lib_pypy.ctypes_config_cache import rebuild -from pypy.module.test_lib_pypy.support import import_lib_pypy - import os if os.name != 'posix': skip('resource.h only available on unix') -class AppTestResource: +try: + from lib_pypy import resource +except ImportError as e: + skip(str(e)) - spaceconfig = dict(usemodules=('_rawffi', 'fcntl', 'itertools', 'select', - 'signal')) - def setup_class(cls): - rebuild.rebuild_one('resource.ctc.py') - cls.w_resource = import_lib_pypy(cls.space, 'resource', - 'No resource module available') +def test_getrusage(): + x = resource.getrusage(resource.RUSAGE_SELF) + assert len(x) == 16 + assert x[0] == x[-16] == x.ru_utime + assert x[1] == x[-15] == x.ru_stime + assert x[2] == x[-14] == x.ru_maxrss + assert x[3] == x[-13] == x.ru_ixrss + assert x[4] == x[-12] == x.ru_idrss + assert x[5] == x[-11] == x.ru_isrss + assert x[6] == x[-10] == x.ru_minflt + assert x[7] == x[-9] == x.ru_majflt + assert x[8] == x[-8] == x.ru_nswap + assert x[9] == x[-7] == x.ru_inblock + assert x[10] == x[-6] == x.ru_oublock + assert x[11] == x[-5] == x.ru_msgsnd + assert x[12] == x[-4] == x.ru_msgrcv + assert x[13] == x[-3] == x.ru_nsignals + assert x[14] == x[-2] == x.ru_nvcsw + assert x[15] == x[-1] == x.ru_nivcsw + for i in range(16): + if i < 2: + expected_type = float + else: + expected_type = int + assert isinstance(x[i], expected_type) - def test_resource(self): - resource = self.resource - x = resource.getrusage(resource.RUSAGE_SELF) - assert len(x) == 16 - assert x[0] == x[-16] == x.ru_utime - assert x[1] == x[-15] == x.ru_stime - assert x[2] == x[-14] == x.ru_maxrss - assert x[3] == x[-13] == x.ru_ixrss - assert x[4] == x[-12] == x.ru_idrss - assert x[5] == x[-11] == x.ru_isrss - assert x[6] == x[-10] == x.ru_minflt - assert x[7] == x[-9] == x.ru_majflt - assert x[8] == x[-8] == x.ru_nswap - assert x[9] == x[-7] == x.ru_inblock - assert x[10] == x[-6] == x.ru_oublock - assert x[11] == x[-5] == x.ru_msgsnd - assert x[12] == x[-4] == x.ru_msgrcv - assert x[13] == x[-3] == x.ru_nsignals - assert x[14] == x[-2] == x.ru_nvcsw - assert x[15] == x[-1] == x.ru_nivcsw - for i in range(16): - if i < 2: - expected_type = float - else: - expected_type = int - assert isinstance(x[i], expected_type) +def test_getrlimit(): + x = resource.getrlimit(resource.RLIMIT_CPU) + assert isinstance(x, tuple) + assert len(x) == 2 + assert isinstance(x[0], int) + assert isinstance(x[1], int) + +def test_setrlimit(): + # minimal "does not crash" test + x = resource.getrlimit(resource.RLIMIT_CPU) + resource.setrlimit(resource.RLIMIT_CPU, x) diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py --- a/pypy/tool/build_cffi_imports.py +++ b/pypy/tool/build_cffi_imports.py @@ -13,6 +13,7 @@ "syslog": "_syslog_build.py" if sys.platform != "win32" else None, "_gdbm": "_gdbm_build.py" if sys.platform != "win32" else None, "pwdgrp": "_pwdgrp_build.py" if sys.platform != "win32" else None, + "resource": "_resource_build.py" if sys.platform != "win32" else None, "lzma": "_lzma_build.py", "_decimal": "_decimal_build.py", "xx": None, # for testing: 'None' should be completely ignored diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -172,6 +172,7 @@ # Careful: to copy lib_pypy, copying just the hg-tracked files # would not be enough: there are also ctypes_config_cache/_*_cache.py. + # XXX ^^^ this is no longer true! shutil.copytree(str(basedir.join('lib-python').join(STDLIB_VER)), str(pypydir.join('lib-python').join(STDLIB_VER)), ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~')) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -182,9 +182,9 @@ """ self._check_type(origvar) prev_loc = self.loc(origvar, must_exist=must_exist) - var2 = TempVar() + var2 = TempInt() if bindvar is None: - bindvar = TempVar() + bindvar = TempInt() if bind_first: loc, loc2 = self.force_allocate_reg_pair(bindvar, var2, self.temp_boxes) else: @@ -310,20 +310,22 @@ # uff! in this case, we need to move a forbidden var to another register assert len(forbidden_vars) <= 8 # otherwise it is NOT possible to complete even, odd = r.r2, r.r3 - even_var = reverse_mapping.get(even, None) - odd_var = reverse_mapping.get(odd, None) - if even_var: - if even_var in forbidden_vars: - self._relocate_forbidden_variable(even, even_var, reverse_mapping, + old_even_var = reverse_mapping.get(even, None) + old_odd_var = reverse_mapping.get(odd, None) + if old_even_var: + if old_even_var in forbidden_vars: + self._relocate_forbidden_variable(even, old_even_var, reverse_mapping, forbidden_vars, odd) else: - self._sync_var(even_var) - if odd_var: - if odd_var in forbidden_vars: - self._relocate_forbidden_variable(odd, odd_var, reverse_mapping, + self._sync_var(old_even_var) + del self.reg_bindings[old_even_var] + if old_odd_var: + if old_odd_var in forbidden_vars: + self._relocate_forbidden_variable(odd, old_odd_var, reverse_mapping, forbidden_vars, even) else: - self._sync_var(odd_var) + self._sync_var(old_odd_var) + del self.reg_bindings[old_odd_var] self.free_regs = [fr for fr in self.free_regs \ if fr is not even and \ @@ -335,6 +337,12 @@ return even, odd def _relocate_forbidden_variable(self, reg, var, reverse_mapping, forbidden_vars, forbidden_reg): + if len(self.free_regs) > 0: + candidate = self.free_regs.pop() + self.assembler.regalloc_mov(reg, candidate) + self.reg_bindings[var] = candidate + reverse_mapping[candidate] = var + for candidate in r.MANAGED_REGS: # move register of var to another register # thus it is not allowed to bei either reg or forbidden_reg @@ -345,9 +353,11 @@ if not candidate_var or candidate_var not in forbidden_vars: if candidate_var is not None: self._sync_var(candidate_var) + del self.reg_bindings[candidate_var] self.assembler.regalloc_mov(reg, candidate) + assert var is not None self.reg_bindings[var] = candidate - reverse_mapping[reg] = var + reverse_mapping[candidate] = var self.free_regs.append(reg) break else: diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -94,7 +94,8 @@ return self.__class__, def getattr(self, s_attr): - assert s_attr.is_constant(), "non-constant attr name in getattr()" + if not s_attr.is_constant(): + raise annmodel.AnnotatorError("non-constant attr name in getattr()") attrname = s_attr.const TYPE = STAT_FIELD_TYPES[attrname] return lltype_to_annotation(TYPE) diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -287,7 +287,12 @@ wr_from_thread.seen = False start_new_thread(thread_entry_point, ()) wr1 = f() - time.sleep(0.5) + count = 0 + while True: + time.sleep(0.5) + if wr_from_thread.seen or count >= 50: + break + count += 1 assert wr_from_thread.seen is True wr2 = wr_from_thread.wr import gc; gc.collect() # wr2() should be collected here From pypy.commits at gmail.com Wed Apr 27 17:38:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 27 Apr 2016 14:38:57 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: merge e35996d1c1b6 Message-ID: <57213171.d1981c0a.4053e.79e8@mx.google.com> Author: Carl Friedrich Bolz Branch: py3k-update Changeset: r83996:72878888045c Date: 2016-04-28 00:38 +0300 http://bitbucket.org/pypy/pypy/changeset/72878888045c/ Log: merge e35996d1c1b6 diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -53,24 +53,24 @@ n = self.root_node if n.type == syms.file_input: stmts = [] - for i in range(len(n.children) - 1): - stmt = n.children[i] + for i in range(n.num_children() - 1): + stmt = n.get_child(i) if stmt.type == tokens.NEWLINE: continue sub_stmts_count = self.number_of_statements(stmt) if sub_stmts_count == 1: stmts.append(self.handle_stmt(stmt)) else: - stmt = stmt.children[0] + stmt = stmt.get_child(0) for j in range(sub_stmts_count): - small_stmt = stmt.children[j * 2] + small_stmt = stmt.get_child(j * 2) stmts.append(self.handle_stmt(small_stmt)) return ast.Module(stmts) elif n.type == syms.eval_input: - body = self.handle_testlist(n.children[0]) + body = self.handle_testlist(n.get_child(0)) return ast.Expression(body) elif n.type == syms.single_input: - first_child = n.children[0] + first_child = n.get_child(0) if first_child.type == tokens.NEWLINE: # An empty line. return ast.Interactive([]) @@ -80,8 +80,8 @@ stmts = [self.handle_stmt(first_child)] else: stmts = [] - for i in range(0, len(first_child.children), 2): - stmt = first_child.children[i] + for i in range(0, first_child.num_children(), 2): + stmt = first_child.get_child(i) if stmt.type == tokens.NEWLINE: break stmts.append(self.handle_stmt(stmt)) @@ -95,16 +95,16 @@ if stmt_type == syms.compound_stmt: return 1 elif stmt_type == syms.stmt: - return self.number_of_statements(n.children[0]) + return self.number_of_statements(n.get_child(0)) elif stmt_type == syms.simple_stmt: # Divide to remove semi-colons. - return len(n.children) // 2 + return n.num_children() // 2 else: raise AssertionError("non-statement node") def error(self, msg, n): """Raise a SyntaxError with the lineno and column set to n's.""" - raise SyntaxError(msg, n.lineno, n.column, + raise SyntaxError(msg, n.get_lineno(), n.get_column(), filename=self.compile_info.filename) def error_ast(self, msg, ast_node): @@ -130,34 +130,34 @@ self.error_ast("cannot assign to %s" % (e.name,), e.node) def handle_del_stmt(self, del_node): - targets = self.handle_exprlist(del_node.children[1], ast.Del) - return ast.Delete(targets, del_node.lineno, del_node.column) + targets = self.handle_exprlist(del_node.get_child(1), ast.Del) + return ast.Delete(targets, del_node.get_lineno(), del_node.get_column()) def handle_flow_stmt(self, flow_node): - first_child = flow_node.children[0] + first_child = flow_node.get_child(0) first_child_type = first_child.type if first_child_type == syms.break_stmt: - return ast.Break(flow_node.lineno, flow_node.column) + return ast.Break(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.continue_stmt: - return ast.Continue(flow_node.lineno, flow_node.column) + return ast.Continue(flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.yield_stmt: - yield_expr = self.handle_expr(first_child.children[0]) - return ast.Expr(yield_expr, flow_node.lineno, flow_node.column) + yield_expr = self.handle_expr(first_child.get_child(0)) + return ast.Expr(yield_expr, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.return_stmt: - if len(first_child.children) == 1: + if first_child.num_children() == 1: values = None else: - values = self.handle_testlist(first_child.children[1]) - return ast.Return(values, flow_node.lineno, flow_node.column) + values = self.handle_testlist(first_child.get_child(1)) + return ast.Return(values, flow_node.get_lineno(), flow_node.get_column()) elif first_child_type == syms.raise_stmt: exc = None cause = None - child_count = len(first_child.children) + child_count = first_child.num_children() if child_count >= 2: - exc = self.handle_expr(first_child.children[1]) + exc = self.handle_expr(first_child.get_child(1)) if child_count >= 4: - cause = self.handle_expr(first_child.children[3]) - return ast.Raise(exc, cause, flow_node.lineno, flow_node.column) + cause = self.handle_expr(first_child.get_child(3)) + return ast.Raise(exc, cause, flow_node.get_lineno(), flow_node.get_column()) else: raise AssertionError("unknown flow statement") @@ -165,33 +165,33 @@ while True: import_name_type = import_name.type if import_name_type == syms.import_as_name: - name = self.new_identifier(import_name.children[0].value) - if len(import_name.children) == 3: + name = self.new_identifier(import_name.get_child(0).get_value()) + if import_name.num_children() == 3: as_name = self.new_identifier( - import_name.children[2].value) - self.check_forbidden_name(as_name, import_name.children[2]) + import_name.get_child(2).get_value()) + self.check_forbidden_name(as_name, import_name.get_child(2)) else: as_name = None - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, as_name) elif import_name_type == syms.dotted_as_name: - if len(import_name.children) == 1: - import_name = import_name.children[0] + if import_name.num_children() == 1: + import_name = import_name.get_child(0) continue - alias = self.alias_for_import_name(import_name.children[0], + alias = self.alias_for_import_name(import_name.get_child(0), store=False) - asname_node = import_name.children[2] - alias.asname = self.new_identifier(asname_node.value) + asname_node = import_name.get_child(2) + alias.asname = self.new_identifier(asname_node.get_value()) self.check_forbidden_name(alias.asname, asname_node) return alias elif import_name_type == syms.dotted_name: - if len(import_name.children) == 1: - name = self.new_identifier(import_name.children[0].value) + if import_name.num_children() == 1: + name = self.new_identifier(import_name.get_child(0).get_value()) if store: - self.check_forbidden_name(name, import_name.children[0]) + self.check_forbidden_name(name, import_name.get_child(0)) return ast.alias(name, None) - name_parts = [import_name.children[i].value - for i in range(0, len(import_name.children), 2)] + name_parts = [import_name.get_child(i).get_value() + for i in range(0, import_name.num_children(), 2)] name = ".".join(name_parts) return ast.alias(name, None) elif import_name_type == tokens.STAR: @@ -200,20 +200,20 @@ raise AssertionError("unknown import name") def handle_import_stmt(self, import_node): - import_node = import_node.children[0] + import_node = import_node.get_child(0) if import_node.type == syms.import_name: - dotted_as_names = import_node.children[1] - aliases = [self.alias_for_import_name(dotted_as_names.children[i]) - for i in range(0, len(dotted_as_names.children), 2)] - return ast.Import(aliases, import_node.lineno, import_node.column) + dotted_as_names = import_node.get_child(1) + aliases = [self.alias_for_import_name(dotted_as_names.get_child(i)) + for i in range(0, dotted_as_names.num_children(), 2)] + return ast.Import(aliases, import_node.get_lineno(), import_node.get_column()) elif import_node.type == syms.import_from: - child_count = len(import_node.children) + child_count = import_node.num_children() module = None modname = None i = 1 dot_count = 0 while i < child_count: - child = import_node.children[i] + child = import_node.get_child(i) child_type = child.type if child_type == syms.dotted_name: module = self.alias_for_import_name(child, False) @@ -227,16 +227,16 @@ i += 1 dot_count += 1 i += 1 - after_import_type = import_node.children[i].type + after_import_type = import_node.get_child(i).type star_import = False if after_import_type == tokens.STAR: - names_node = import_node.children[i] + names_node = import_node.get_child(i) star_import = True elif after_import_type == tokens.LPAR: - names_node = import_node.children[i + 1] + names_node = import_node.get_child(i + 1) elif after_import_type == syms.import_as_names: - names_node = import_node.children[i] - if len(names_node.children) % 2 == 0: + names_node = import_node.get_child(i) + if names_node.num_children() % 2 == 0: self.error("trailing comma is only allowed with " "surronding parenthesis", names_node) else: @@ -244,307 +244,308 @@ if star_import: aliases = [self.alias_for_import_name(names_node)] else: - aliases = [self.alias_for_import_name(names_node.children[i]) - for i in range(0, len(names_node.children), 2)] + aliases = [self.alias_for_import_name(names_node.get_child(i)) + for i in range(0, names_node.num_children(), 2)] if module is not None: modname = module.name return ast.ImportFrom(modname, aliases, dot_count, - import_node.lineno, import_node.column) + import_node.get_lineno(), import_node.get_column()) else: raise AssertionError("unknown import node") def handle_global_stmt(self, global_node): - names = [self.new_identifier(global_node.children[i].value) - for i in range(1, len(global_node.children), 2)] - return ast.Global(names, global_node.lineno, global_node.column) + names = [self.new_identifier(global_node.get_child(i).get_value()) + for i in range(1, global_node.num_children(), 2)] + return ast.Global(names, global_node.get_lineno(), global_node.get_column()) def handle_nonlocal_stmt(self, nonlocal_node): - names = [self.new_identifier(nonlocal_node.children[i].value) - for i in range(1, len(nonlocal_node.children), 2)] - return ast.Nonlocal(names, nonlocal_node.lineno, nonlocal_node.column) + names = [self.new_identifier(nonlocal_node.get_child(i).get_value()) + for i in range(1, nonlocal_node.num_children(), 2)] + return ast.Nonlocal(names, nonlocal_node.get_lineno(), nonlocal_node.get_column()) def handle_assert_stmt(self, assert_node): - expr = self.handle_expr(assert_node.children[1]) + expr = self.handle_expr(assert_node.get_child(1)) msg = None - if len(assert_node.children) == 4: - msg = self.handle_expr(assert_node.children[3]) - return ast.Assert(expr, msg, assert_node.lineno, assert_node.column) + if assert_node.num_children() == 4: + msg = self.handle_expr(assert_node.get_child(3)) + return ast.Assert(expr, msg, assert_node.get_lineno(), assert_node.get_column()) def handle_suite(self, suite_node): - first_child = suite_node.children[0] + first_child = suite_node.get_child(0) if first_child.type == syms.simple_stmt: - end = len(first_child.children) - 1 - if first_child.children[end - 1].type == tokens.SEMI: + end = first_child.num_children() - 1 + if first_child.get_child(end - 1).type == tokens.SEMI: end -= 1 - stmts = [self.handle_stmt(first_child.children[i]) + stmts = [self.handle_stmt(first_child.get_child(i)) for i in range(0, end, 2)] else: stmts = [] - for i in range(2, len(suite_node.children) - 1): - stmt = suite_node.children[i] + for i in range(2, suite_node.num_children() - 1): + stmt = suite_node.get_child(i) stmt_count = self.number_of_statements(stmt) if stmt_count == 1: stmts.append(self.handle_stmt(stmt)) else: - simple_stmt = stmt.children[0] - for j in range(0, len(simple_stmt.children), 2): - stmt = simple_stmt.children[j] - if not stmt.children: + simple_stmt = stmt.get_child(0) + for j in range(0, simple_stmt.num_children(), 2): + stmt = simple_stmt.get_child(j) + if not stmt.num_children(): break stmts.append(self.handle_stmt(stmt)) return stmts def handle_if_stmt(self, if_node): - child_count = len(if_node.children) + child_count = if_node.num_children() if child_count == 4: - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - return ast.If(test, suite, None, if_node.lineno, if_node.column) - otherwise_string = if_node.children[4].value + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + return ast.If(test, suite, None, if_node.get_lineno(), if_node.get_column()) + otherwise_string = if_node.get_child(4).get_value() if otherwise_string == "else": - test = self.handle_expr(if_node.children[1]) - suite = self.handle_suite(if_node.children[3]) - else_suite = self.handle_suite(if_node.children[6]) - return ast.If(test, suite, else_suite, if_node.lineno, - if_node.column) + test = self.handle_expr(if_node.get_child(1)) + suite = self.handle_suite(if_node.get_child(3)) + else_suite = self.handle_suite(if_node.get_child(6)) + return ast.If(test, suite, else_suite, if_node.get_lineno(), + if_node.get_column()) elif otherwise_string == "elif": elif_count = child_count - 4 - after_elif = if_node.children[elif_count + 1] + after_elif = if_node.get_child(elif_count + 1) if after_elif.type == tokens.NAME and \ - after_elif.value == "else": + after_elif.get_value() == "else": has_else = True elif_count -= 3 else: has_else = False elif_count /= 4 if has_else: - last_elif = if_node.children[-6] + last_elif = if_node.get_child(-6) last_elif_test = self.handle_expr(last_elif) - elif_body = self.handle_suite(if_node.children[-4]) - else_body = self.handle_suite(if_node.children[-1]) + elif_body = self.handle_suite(if_node.get_child(-4)) + else_body = self.handle_suite(if_node.get_child(-1)) otherwise = [ast.If(last_elif_test, elif_body, else_body, - last_elif.lineno, last_elif.column)] + last_elif.get_lineno(), last_elif.get_column())] elif_count -= 1 else: otherwise = None for i in range(elif_count): offset = 5 + (elif_count - i - 1) * 4 - elif_test_node = if_node.children[offset] + elif_test_node = if_node.get_child(offset) elif_test = self.handle_expr(elif_test_node) - elif_body = self.handle_suite(if_node.children[offset + 2]) + elif_body = self.handle_suite(if_node.get_child(offset + 2)) new_if = ast.If(elif_test, elif_body, otherwise, - elif_test_node.lineno, elif_test_node.column) + elif_test_node.get_lineno(), elif_test_node.get_column()) otherwise = [new_if] - expr = self.handle_expr(if_node.children[1]) - body = self.handle_suite(if_node.children[3]) - return ast.If(expr, body, otherwise, if_node.lineno, if_node.column) + expr = self.handle_expr(if_node.get_child(1)) + body = self.handle_suite(if_node.get_child(3)) + return ast.If(expr, body, otherwise, if_node.get_lineno(), if_node.get_column()) else: raise AssertionError("unknown if statement configuration") def handle_while_stmt(self, while_node): - loop_test = self.handle_expr(while_node.children[1]) - body = self.handle_suite(while_node.children[3]) - if len(while_node.children) == 7: - otherwise = self.handle_suite(while_node.children[6]) + loop_test = self.handle_expr(while_node.get_child(1)) + body = self.handle_suite(while_node.get_child(3)) + if while_node.num_children() == 7: + otherwise = self.handle_suite(while_node.get_child(6)) else: otherwise = None - return ast.While(loop_test, body, otherwise, while_node.lineno, - while_node.column) + return ast.While(loop_test, body, otherwise, while_node.get_lineno(), + while_node.get_column()) def handle_for_stmt(self, for_node): - target_node = for_node.children[1] + target_node = for_node.get_child(1) target_as_exprlist = self.handle_exprlist(target_node, ast.Store) - if len(target_node.children) == 1: + if target_node.num_children() == 1: target = target_as_exprlist[0] else: target = ast.Tuple(target_as_exprlist, ast.Store, - target_node.lineno, target_node.column) - expr = self.handle_testlist(for_node.children[3]) - body = self.handle_suite(for_node.children[5]) - if len(for_node.children) == 9: - otherwise = self.handle_suite(for_node.children[8]) + target_node.get_lineno(), target_node.get_column()) + expr = self.handle_testlist(for_node.get_child(3)) + body = self.handle_suite(for_node.get_child(5)) + if for_node.num_children() == 9: + otherwise = self.handle_suite(for_node.get_child(8)) else: otherwise = None - return ast.For(target, expr, body, otherwise, for_node.lineno, - for_node.column) + return ast.For(target, expr, body, otherwise, for_node.get_lineno(), + for_node.get_column()) def handle_except_clause(self, exc, body): test = None name = None suite = self.handle_suite(body) - child_count = len(exc.children) + child_count = exc.num_children() if child_count >= 2: - test = self.handle_expr(exc.children[1]) + test = self.handle_expr(exc.get_child(1)) if child_count == 4: - name_node = exc.children[3] - name = self.new_identifier(name_node.value) + name_node = exc.get_child(3) + name = self.new_identifier(name_node.get_value()) self.check_forbidden_name(name, name_node) - return ast.ExceptHandler(test, name, suite, exc.lineno, exc.column) + return ast.ExceptHandler(test, name, suite, exc.get_lineno(), exc.get_column()) def handle_try_stmt(self, try_node): - body = self.handle_suite(try_node.children[2]) - child_count = len(try_node.children) + body = self.handle_suite(try_node.get_child(2)) + child_count = try_node.num_children() except_count = (child_count - 3 ) // 3 otherwise = None finally_suite = None - possible_extra_clause = try_node.children[-3] + possible_extra_clause = try_node.get_child(-3) if possible_extra_clause.type == tokens.NAME: - if possible_extra_clause.value == "finally": + if possible_extra_clause.get_value() == "finally": if child_count >= 9 and \ - try_node.children[-6].type == tokens.NAME: - otherwise = self.handle_suite(try_node.children[-4]) + try_node.get_child(-6).type == tokens.NAME: + otherwise = self.handle_suite(try_node.get_child(-4)) except_count -= 1 - finally_suite = self.handle_suite(try_node.children[-1]) + finally_suite = self.handle_suite(try_node.get_child(-1)) except_count -= 1 else: - otherwise = self.handle_suite(try_node.children[-1]) + otherwise = self.handle_suite(try_node.get_child(-1)) except_count -= 1 handlers = [] if except_count: for i in range(except_count): base_offset = i * 3 - exc = try_node.children[3 + base_offset] - except_body = try_node.children[5 + base_offset] + exc = try_node.get_child(3 + base_offset) + except_body = try_node.get_child(5 + base_offset) handlers.append(self.handle_except_clause(exc, except_body)) return ast.Try(body, handlers, otherwise, finally_suite, - try_node.lineno, try_node.column) + try_node.get_lineno(), try_node.get_column()) def handle_with_stmt(self, with_node): - body = self.handle_suite(with_node.children[-1]) - i = len(with_node.children) - 1 + body = self.handle_suite(with_node.get_child(-1)) + i = with_node.num_children() - 1 while True: i -= 2 - item = with_node.children[i] - test = self.handle_expr(item.children[0]) - if len(item.children) == 3: - target = self.handle_expr(item.children[2]) + item = with_node.get_child(i) + test = self.handle_expr(item.get_child(0)) + if item.num_children() == 3: + target = self.handle_expr(item.get_child(2)) self.set_context(target, ast.Store) else: target = None - wi = ast.With(test, target, body, with_node.lineno, - with_node.column) + wi = ast.With(test, target, body, with_node.get_lineno(), + with_node.get_column()) if i == 1: break body = [wi] return wi def handle_with_item(self, item_node): - test = self.handle_expr(item_node.children[0]) - if len(item_node.children) == 3: - target = self.handle_expr(item_node.children[2]) + test = self.handle_expr(item_node.get_child(0)) + if item_node.num_children() == 3: + target = self.handle_expr(item_node.get_child(2)) self.set_context(target, ast.Store) else: target = None return ast.withitem(test, target) def handle_with_stmt(self, with_node): - body = self.handle_suite(with_node.children[-1]) - items = [self.handle_with_item(with_node.children[i]) - for i in range(1, len(with_node.children)-2, 2)] - return ast.With(items, body, with_node.lineno, with_node.column) + body = self.handle_suite(with_node.get_child(-1)) + items = [self.handle_with_item(with_node.get_child(i)) + for i in range(1, with_node.num_children()-2, 2)] + return ast.With(items, body, with_node.get_lineno(), with_node.get_column()) def handle_classdef(self, classdef_node, decorators=None): - name_node = classdef_node.children[1] - name = self.new_identifier(name_node.value) + name_node = classdef_node.get_child(1) + name = self.new_identifier(name_node.get_value()) self.check_forbidden_name(name, name_node) - if len(classdef_node.children) == 4: + if classdef_node.num_children() == 4: # class NAME ':' suite - body = self.handle_suite(classdef_node.children[3]) + body = self.handle_suite(classdef_node.get_child(3)) return ast.ClassDef(name, None, None, None, None, body, decorators, - classdef_node.lineno, classdef_node.column) - if classdef_node.children[3].type == tokens.RPAR: + classdef_node.get_lineno(), classdef_node.get_column()) + if classdef_node.get_child(3).type == tokens.RPAR: # class NAME '(' ')' ':' suite - body = self.handle_suite(classdef_node.children[5]) + body = self.handle_suite(classdef_node.get_child(5)) return ast.ClassDef(name, None, None, None, None, body, decorators, - classdef_node.lineno, classdef_node.column) + classdef_node.get_lineno(), classdef_node.get_column()) # class NAME '(' arglist ')' ':' suite # build up a fake Call node so we can extract its pieces - call_name = ast.Name(name, ast.Load, classdef_node.lineno, - classdef_node.column) - call = self.handle_call(classdef_node.children[3], call_name) - body = self.handle_suite(classdef_node.children[6]) + call_name = ast.Name(name, ast.Load, classdef_node.get_lineno(), + classdef_node.get_column()) + call = self.handle_call(classdef_node.get_child(3), call_name) + body = self.handle_suite(classdef_node.get_child(6)) return ast.ClassDef( name, call.args, call.keywords, call.starargs, call.kwargs, - body, decorators, classdef_node.lineno, classdef_node.column) + body, decorators, classdef_node.get_lineno(), classdef_node.get_column()) def handle_class_bases(self, bases_node): - if len(bases_node.children) == 1: - return [self.handle_expr(bases_node.children[0])] + if bases_node.num_children() == 1: + return [self.handle_expr(bases_node.get_child(0))] return self.get_expression_list(bases_node) def handle_funcdef(self, funcdef_node, decorators=None): - name_node = funcdef_node.children[1] - name = self.new_identifier(name_node.value) + name_node = funcdef_node.get_child(1) + name = self.new_identifier(name_node.get_value()) self.check_forbidden_name(name, name_node) - args = self.handle_arguments(funcdef_node.children[2]) + args = self.handle_arguments(funcdef_node.get_child(2)) suite = 4 returns = None - if funcdef_node.children[3].type == tokens.RARROW: - returns = self.handle_expr(funcdef_node.children[4]) + if funcdef_node.get_child(3).type == tokens.RARROW: + returns = self.handle_expr(funcdef_node.get_child(4)) suite += 2 - body = self.handle_suite(funcdef_node.children[suite]) + body = self.handle_suite(funcdef_node.get_child(suite)) return ast.FunctionDef(name, args, body, decorators, returns, - funcdef_node.lineno, funcdef_node.column) + funcdef_node.get_lineno(), funcdef_node.get_column()) def handle_decorated(self, decorated_node): - decorators = self.handle_decorators(decorated_node.children[0]) - definition = decorated_node.children[1] + decorators = self.handle_decorators(decorated_node.get_child(0)) + definition = decorated_node.get_child(1) if definition.type == syms.funcdef: node = self.handle_funcdef(definition, decorators) elif definition.type == syms.classdef: node = self.handle_classdef(definition, decorators) else: raise AssertionError("unkown decorated") - node.lineno = decorated_node.lineno - node.col_offset = decorated_node.column + node.lineno = decorated_node.get_lineno() + node.col_offset = decorated_node.get_column() return node def handle_decorators(self, decorators_node): - return [self.handle_decorator(dec) for dec in decorators_node.children] + return [self.handle_decorator(decorators_node.get_child(i)) + for i in range(decorators_node.num_children())] def handle_decorator(self, decorator_node): - dec_name = self.handle_dotted_name(decorator_node.children[1]) - if len(decorator_node.children) == 3: + dec_name = self.handle_dotted_name(decorator_node.get_child(1)) + if decorator_node.num_children() == 3: dec = dec_name - elif len(decorator_node.children) == 5: + elif decorator_node.num_children() == 5: dec = ast.Call(dec_name, None, None, None, None, - decorator_node.lineno, decorator_node.column) + decorator_node.get_lineno(), decorator_node.get_column()) else: - dec = self.handle_call(decorator_node.children[3], dec_name) + dec = self.handle_call(decorator_node.get_child(3), dec_name) return dec def handle_dotted_name(self, dotted_name_node): - base_value = self.new_identifier(dotted_name_node.children[0].value) - name = ast.Name(base_value, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) - for i in range(2, len(dotted_name_node.children), 2): - attr = dotted_name_node.children[i].value + base_value = self.new_identifier(dotted_name_node.get_child(0).get_value()) + name = ast.Name(base_value, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) + for i in range(2, dotted_name_node.num_children(), 2): + attr = dotted_name_node.get_child(i).get_value() attr = self.new_identifier(attr) - name = ast.Attribute(name, attr, ast.Load, dotted_name_node.lineno, - dotted_name_node.column) + name = ast.Attribute(name, attr, ast.Load, dotted_name_node.get_lineno(), + dotted_name_node.get_column()) return name def handle_arguments(self, arguments_node): # This function handles both typedargslist (function definition) # and varargslist (lambda definition). if arguments_node.type == syms.parameters: - if len(arguments_node.children) == 2: + if arguments_node.num_children() == 2: return ast.arguments(None, None, None, None, None, None, None, None) - arguments_node = arguments_node.children[1] + arguments_node = arguments_node.get_child(1) i = 0 - child_count = len(arguments_node.children) + child_count = arguments_node.num_children() n_pos = 0 n_pos_def = 0 n_kwdonly = 0 # scan args while i < child_count: - arg_type = arguments_node.children[i].type + arg_type = arguments_node.get_child(i).type if arg_type == tokens.STAR: i += 1 if i < child_count: - next_arg_type = arguments_node.children[i].type + next_arg_type = arguments_node.get_child(i).type if (next_arg_type == syms.tfpdef or next_arg_type == syms.vfpdef): i += 1 @@ -557,7 +558,7 @@ n_pos_def += 1 i += 1 while i < child_count: - arg_type = arguments_node.children[i].type + arg_type = arguments_node.get_child(i).type if arg_type == tokens.DOUBLESTAR: break if arg_type == syms.vfpdef or arg_type == syms.tfpdef: @@ -577,12 +578,12 @@ i = 0 have_default = False while i < child_count: - arg = arguments_node.children[i] + arg = arguments_node.get_child(i) arg_type = arg.type if arg_type == syms.tfpdef or arg_type == syms.vfpdef: if i + 1 < child_count and \ - arguments_node.children[i + 1].type == tokens.EQUAL: - default_node = arguments_node.children[i + 2] + arguments_node.get_child(i + 1).type == tokens.EQUAL: + default_node = arguments_node.get_child(i + 2) posdefaults.append(self.handle_expr(default_node)) i += 2 have_default = True @@ -595,32 +596,32 @@ if i + 1 >= child_count: self.error("named arguments must follow bare *", arguments_node) - name_node = arguments_node.children[i + 1] + name_node = arguments_node.get_child(i + 1) keywordonly_args = [] if name_node.type == tokens.COMMA: i += 2 i = self.handle_keywordonly_args(arguments_node, i, kwonly, kwdefaults) else: - vararg = name_node.children[0].value + vararg = name_node.get_child(0).get_value() vararg = self.new_identifier(vararg) self.check_forbidden_name(vararg, name_node) - if len(name_node.children) > 1: - varargann = self.handle_expr(name_node.children[2]) + if name_node.num_children() > 1: + varargann = self.handle_expr(name_node.get_child(2)) i += 3 if i < child_count: - next_arg_type = arguments_node.children[i].type + next_arg_type = arguments_node.get_child(i).type if (next_arg_type == syms.tfpdef or next_arg_type == syms.vfpdef): i = self.handle_keywordonly_args(arguments_node, i, kwonly, kwdefaults) elif arg_type == tokens.DOUBLESTAR: - name_node = arguments_node.children[i + 1] - kwarg = name_node.children[0].value + name_node = arguments_node.get_child(i + 1) + kwarg = name_node.get_child(0).get_value() kwarg = self.new_identifier(kwarg) self.check_forbidden_name(kwarg, name_node) - if len(name_node.children) > 1: - kwargann = self.handle_expr(name_node.children[2]) + if name_node.num_children() > 1: + kwargann = self.handle_expr(name_node.get_child(2)) i += 3 else: raise AssertionError("unknown node in argument list") @@ -630,24 +631,24 @@ def handle_keywordonly_args(self, arguments_node, i, kwonly, kwdefaults): if kwonly is None: self.error("named arguments must follows bare *", - arguments_node.children[i]) - child_count = len(arguments_node.children) + arguments_node.get_child(i)) + child_count = arguments_node.num_children() while i < child_count: - arg = arguments_node.children[i] + arg = arguments_node.get_child(i) arg_type = arg.type if arg_type == syms.vfpdef or arg_type == syms.tfpdef: if (i + 1 < child_count and - arguments_node.children[i + 1].type == tokens.EQUAL): - expr = self.handle_expr(arguments_node.children[i + 2]) + arguments_node.get_child(i + 1).type == tokens.EQUAL): + expr = self.handle_expr(arguments_node.get_child(i + 2)) kwdefaults.append(expr) i += 2 else: kwdefaults.append(None) ann = None - if len(arg.children) == 3: - ann = self.handle_expr(arg.children[2]) - name_node = arg.children[0] - argname = name_node.value + if arg.num_children() == 3: + ann = self.handle_expr(arg.get_child(2)) + name_node = arg.get_child(0) + argname = name_node.get_value() argname = self.new_identifier(argname) self.check_forbidden_name(argname, name_node) kwonly.append(ast.arg(argname, ann)) @@ -657,31 +658,31 @@ return i def handle_arg(self, arg_node): - name_node = arg_node.children[0] - name = self.new_identifier(name_node.value) + name_node = arg_node.get_child(0) + name = self.new_identifier(name_node.get_value()) self.check_forbidden_name(name, arg_node) ann = None - if len(arg_node.children) == 3: - ann = self.handle_expr(arg_node.children[2]) + if arg_node.num_children() == 3: + ann = self.handle_expr(arg_node.get_child(2)) return ast.arg(name, ann) def handle_stmt(self, stmt): stmt_type = stmt.type if stmt_type == syms.stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.simple_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.small_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.expr_stmt: return self.handle_expr_stmt(stmt) elif stmt_type == syms.del_stmt: return self.handle_del_stmt(stmt) elif stmt_type == syms.pass_stmt: - return ast.Pass(stmt.lineno, stmt.column) + return ast.Pass(stmt.get_lineno(), stmt.get_column()) elif stmt_type == syms.flow_stmt: return self.handle_flow_stmt(stmt) elif stmt_type == syms.import_stmt: @@ -695,7 +696,7 @@ else: raise AssertionError("unhandled small statement") elif stmt_type == syms.compound_stmt: - stmt = stmt.children[0] + stmt = stmt.get_child(0) stmt_type = stmt.type if stmt_type == syms.if_stmt: return self.handle_if_stmt(stmt) @@ -719,95 +720,95 @@ raise AssertionError("unknown statment type") def handle_expr_stmt(self, stmt): - if len(stmt.children) == 1: - expression = self.handle_testlist(stmt.children[0]) - return ast.Expr(expression, stmt.lineno, stmt.column) - elif stmt.children[1].type == syms.augassign: + if stmt.num_children() == 1: + expression = self.handle_testlist(stmt.get_child(0)) + return ast.Expr(expression, stmt.get_lineno(), stmt.get_column()) + elif stmt.get_child(1).type == syms.augassign: # Augmented assignment. - target_child = stmt.children[0] + target_child = stmt.get_child(0) target_expr = self.handle_testlist(target_child) self.set_context(target_expr, ast.Store) - value_child = stmt.children[2] + value_child = stmt.get_child(2) if value_child.type == syms.testlist: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - op_str = stmt.children[1].children[0].value + op_str = stmt.get_child(1).get_child(0).get_value() operator = augassign_operator_map[op_str] return ast.AugAssign(target_expr, operator, value_expr, - stmt.lineno, stmt.column) + stmt.get_lineno(), stmt.get_column()) else: # Normal assignment. targets = [] - for i in range(0, len(stmt.children) - 2, 2): - target_node = stmt.children[i] + for i in range(0, stmt.num_children() - 2, 2): + target_node = stmt.get_child(i) if target_node.type == syms.yield_expr: self.error("assignment to yield expression not possible", target_node) target_expr = self.handle_testlist(target_node) self.set_context(target_expr, ast.Store) targets.append(target_expr) - value_child = stmt.children[-1] + value_child = stmt.get_child(-1) if value_child.type == syms.testlist_star_expr: value_expr = self.handle_testlist(value_child) else: value_expr = self.handle_expr(value_child) - return ast.Assign(targets, value_expr, stmt.lineno, stmt.column) + return ast.Assign(targets, value_expr, stmt.get_lineno(), stmt.get_column()) def get_expression_list(self, tests): - return [self.handle_expr(tests.children[i]) - for i in range(0, len(tests.children), 2)] + return [self.handle_expr(tests.get_child(i)) + for i in range(0, tests.num_children(), 2)] def handle_testlist(self, tests): - if len(tests.children) == 1: - return self.handle_expr(tests.children[0]) + if tests.num_children() == 1: + return self.handle_expr(tests.get_child(0)) else: elts = self.get_expression_list(tests) - return ast.Tuple(elts, ast.Load, tests.lineno, tests.column) + return ast.Tuple(elts, ast.Load, tests.get_lineno(), tests.get_column()) def handle_expr(self, expr_node): # Loop until we return something. while True: expr_node_type = expr_node.type if expr_node_type == syms.test or expr_node_type == syms.test_nocond: - first_child = expr_node.children[0] + first_child = expr_node.get_child(0) if first_child.type in (syms.lambdef, syms.lambdef_nocond): return self.handle_lambdef(first_child) - elif len(expr_node.children) > 1: + elif expr_node.num_children() > 1: return self.handle_ifexp(expr_node) else: expr_node = first_child elif expr_node_type == syms.or_test or \ expr_node_type == syms.and_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - seq = [self.handle_expr(expr_node.children[i]) - for i in range(0, len(expr_node.children), 2)] + seq = [self.handle_expr(expr_node.get_child(i)) + for i in range(0, expr_node.num_children(), 2)] if expr_node_type == syms.or_test: op = ast.Or else: op = ast.And - return ast.BoolOp(op, seq, expr_node.lineno, expr_node.column) + return ast.BoolOp(op, seq, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.not_test: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue - expr = self.handle_expr(expr_node.children[1]) - return ast.UnaryOp(ast.Not, expr, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(1)) + return ast.UnaryOp(ast.Not, expr, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.comparison: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue operators = [] operands = [] - expr = self.handle_expr(expr_node.children[0]) - for i in range(1, len(expr_node.children), 2): - operators.append(self.handle_comp_op(expr_node.children[i])) - operands.append(self.handle_expr(expr_node.children[i + 1])) - return ast.Compare(expr, operators, operands, expr_node.lineno, - expr_node.column) + expr = self.handle_expr(expr_node.get_child(0)) + for i in range(1, expr_node.num_children(), 2): + operators.append(self.handle_comp_op(expr_node.get_child(i))) + operands.append(self.handle_expr(expr_node.get_child(i + 1))) + return ast.Compare(expr, operators, operands, expr_node.get_lineno(), + expr_node.get_column()) elif expr_node_type == syms.star_expr: return self.handle_star_expr(expr_node) elif expr_node_type == syms.expr or \ @@ -816,27 +817,27 @@ expr_node_type == syms.shift_expr or \ expr_node_type == syms.arith_expr or \ expr_node_type == syms.term: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_binop(expr_node) elif expr_node_type == syms.yield_expr: is_from = False - if len(expr_node.children) > 1: - arg_node = expr_node.children[1] # yield arg - if len(arg_node.children) == 2: + if expr_node.num_children() > 1: + arg_node = expr_node.get_child(1) # yield arg + if arg_node.num_children() == 2: is_from = True - expr = self.handle_expr(arg_node.children[1]) + expr = self.handle_expr(arg_node.get_child(1)) else: - expr = self.handle_testlist(arg_node.children[0]) + expr = self.handle_testlist(arg_node.get_child(0)) else: expr = None if is_from: - return ast.YieldFrom(expr, expr_node.lineno, expr_node.column) - return ast.Yield(expr, expr_node.lineno, expr_node.column) + return ast.YieldFrom(expr, expr_node.get_lineno(), expr_node.get_column()) + return ast.Yield(expr, expr_node.get_lineno(), expr_node.get_column()) elif expr_node_type == syms.factor: - if len(expr_node.children) == 1: - expr_node = expr_node.children[0] + if expr_node.num_children() == 1: + expr_node = expr_node.get_child(0) continue return self.handle_factor(expr_node) elif expr_node_type == syms.power: @@ -845,29 +846,29 @@ raise AssertionError("unknown expr") def handle_star_expr(self, star_expr_node): - expr = self.handle_expr(star_expr_node.children[1]) - return ast.Starred(expr, ast.Load, star_expr_node.lineno, - star_expr_node.column) + expr = self.handle_expr(star_expr_node.get_child(1)) + return ast.Starred(expr, ast.Load, star_expr_node.get_lineno(), + star_expr_node.get_column()) def handle_lambdef(self, lambdef_node): - expr = self.handle_expr(lambdef_node.children[-1]) - if len(lambdef_node.children) == 3: + expr = self.handle_expr(lambdef_node.get_child(-1)) + if lambdef_node.num_children() == 3: args = ast.arguments(None, None, None, None, None, None, None, None) else: - args = self.handle_arguments(lambdef_node.children[1]) - return ast.Lambda(args, expr, lambdef_node.lineno, lambdef_node.column) + args = self.handle_arguments(lambdef_node.get_child(1)) + return ast.Lambda(args, expr, lambdef_node.get_lineno(), lambdef_node.get_column()) def handle_ifexp(self, if_expr_node): - body = self.handle_expr(if_expr_node.children[0]) - expression = self.handle_expr(if_expr_node.children[2]) - otherwise = self.handle_expr(if_expr_node.children[4]) - return ast.IfExp(expression, body, otherwise, if_expr_node.lineno, - if_expr_node.column) + body = self.handle_expr(if_expr_node.get_child(0)) + expression = self.handle_expr(if_expr_node.get_child(2)) + otherwise = self.handle_expr(if_expr_node.get_child(4)) + return ast.IfExp(expression, body, otherwise, if_expr_node.get_lineno(), + if_expr_node.get_column()) def handle_comp_op(self, comp_op_node): - comp_node = comp_op_node.children[0] + comp_node = comp_op_node.get_child(0) comp_type = comp_node.type - if len(comp_op_node.children) == 1: + if comp_op_node.num_children() == 1: if comp_type == tokens.LESS: return ast.Lt elif comp_type == tokens.GREATER: @@ -880,46 +881,47 @@ return ast.GtE elif comp_type == tokens.NOTEQUAL: flufl = self.compile_info.flags & consts.CO_FUTURE_BARRY_AS_BDFL - if flufl and comp_node.value == '!=': + if flufl and comp_node.get_value() == '!=': self.error('invalid comparison', comp_node) - elif not flufl and comp_node.value == '<>': + elif not flufl and comp_node.get_value() == '<>': self.error('invalid comparison', comp_node) return ast.NotEq elif comp_type == tokens.NAME: - if comp_node.value == "is": + if comp_node.get_value() == "is": return ast.Is - elif comp_node.value == "in": + elif comp_node.get_value() == "in": return ast.In else: raise AssertionError("invalid comparison") else: raise AssertionError("invalid comparison") else: - if comp_op_node.children[1].value == "in": + if comp_op_node.get_child(1).get_value() == "in": return ast.NotIn - elif comp_node.value == "is": + elif comp_node.get_value() == "is": return ast.IsNot else: raise AssertionError("invalid comparison") def handle_binop(self, binop_node): - left = self.handle_expr(binop_node.children[0]) - right = self.handle_expr(binop_node.children[2]) - op = operator_map(binop_node.children[1].type) - result = ast.BinOp(left, op, right, binop_node.lineno, - binop_node.column) - number_of_ops = (len(binop_node.children) - 1) / 2 + left = self.handle_expr(binop_node.get_child(0)) + right = self.handle_expr(binop_node.get_child(2)) + op = operator_map(binop_node.get_child(1).type) + result = ast.BinOp(left, op, right, binop_node.get_lineno(), + binop_node.get_column()) + number_of_ops = (binop_node.num_children() - 1) / 2 for i in range(1, number_of_ops): - op_node = binop_node.children[i * 2 + 1] + op_node = binop_node.get_child(i * 2 + 1) op = operator_map(op_node.type) - sub_right = self.handle_expr(binop_node.children[i * 2 + 2]) - result = ast.BinOp(result, op, sub_right, op_node.lineno, - op_node.column) + sub_right = self.handle_expr(binop_node.get_child(i * 2 + 2)) + result = ast.BinOp(result, op, sub_right, op_node.get_lineno(), + op_node.get_column()) return result def handle_factor(self, factor_node): - expr = self.handle_expr(factor_node.children[1]) - op_type = factor_node.children[0].type + from pypy.interpreter.pyparser.parser import Terminal + expr = self.handle_expr(factor_node.get_child(1)) + op_type = factor_node.get_child(0).type if op_type == tokens.PLUS: op = ast.UAdd elif op_type == tokens.MINUS: @@ -928,29 +930,29 @@ op = ast.Invert else: raise AssertionError("invalid factor node") - return ast.UnaryOp(op, expr, factor_node.lineno, factor_node.column) + return ast.UnaryOp(op, expr, factor_node.get_lineno(), factor_node.get_column()) def handle_power(self, power_node): - atom_expr = self.handle_atom(power_node.children[0]) - if len(power_node.children) == 1: + atom_expr = self.handle_atom(power_node.get_child(0)) + if power_node.num_children() == 1: return atom_expr - for i in range(1, len(power_node.children)): - trailer = power_node.children[i] + for i in range(1, power_node.num_children()): + trailer = power_node.get_child(i) if trailer.type != syms.trailer: break tmp_atom_expr = self.handle_trailer(trailer, atom_expr) tmp_atom_expr.lineno = atom_expr.lineno tmp_atom_expr.col_offset = atom_expr.col_offset atom_expr = tmp_atom_expr - if power_node.children[-1].type == syms.factor: - right = self.handle_expr(power_node.children[-1]) - atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.lineno, - power_node.column) + if power_node.get_child(-1).type == syms.factor: + right = self.handle_expr(power_node.get_child(-1)) + atom_expr = ast.BinOp(atom_expr, ast.Pow, right, power_node.get_lineno(), + power_node.get_column()) return atom_expr def handle_slice(self, slice_node): - first_child = slice_node.children[0] - if len(slice_node.children) == 1 and first_child.type == syms.test: + first_child = slice_node.get_child(0) + if slice_node.num_children() == 1 and first_child.type == syms.test: index = self.handle_expr(first_child) return ast.Index(index) lower = None @@ -959,68 +961,69 @@ if first_child.type == syms.test: lower = self.handle_expr(first_child) if first_child.type == tokens.COLON: - if len(slice_node.children) > 1: - second_child = slice_node.children[1] + if slice_node.num_children() > 1: + second_child = slice_node.get_child(1) if second_child.type == syms.test: upper = self.handle_expr(second_child) - elif len(slice_node.children) > 2: - third_child = slice_node.children[2] + elif slice_node.num_children() > 2: + third_child = slice_node.get_child(2) if third_child.type == syms.test: upper = self.handle_expr(third_child) - last_child = slice_node.children[-1] + last_child = slice_node.get_child(-1) if last_child.type == syms.sliceop: - if len(last_child.children) != 1: - step_child = last_child.children[1] + if last_child.num_children() != 1: + step_child = last_child.get_child(1) if step_child.type == syms.test: step = self.handle_expr(step_child) return ast.Slice(lower, upper, step) def handle_trailer(self, trailer_node, left_expr): - first_child = trailer_node.children[0] + first_child = trailer_node.get_child(0) if first_child.type == tokens.LPAR: - if len(trailer_node.children) == 2: + if trailer_node.num_children() == 2: return ast.Call(left_expr, None, None, None, None, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - return self.handle_call(trailer_node.children[1], left_expr) + return self.handle_call(trailer_node.get_child(1), left_expr) elif first_child.type == tokens.DOT: - attr = self.new_identifier(trailer_node.children[1].value) + attr = self.new_identifier(trailer_node.get_child(1).get_value()) return ast.Attribute(left_expr, attr, ast.Load, - trailer_node.lineno, trailer_node.column) + trailer_node.get_lineno(), trailer_node.get_column()) else: - middle = trailer_node.children[1] - if len(middle.children) == 1: - slice = self.handle_slice(middle.children[0]) + middle = trailer_node.get_child(1) + if middle.num_children() == 1: + slice = self.handle_slice(middle.get_child(0)) return ast.Subscript(left_expr, slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) slices = [] simple = True - for i in range(0, len(middle.children), 2): - slc = self.handle_slice(middle.children[i]) + for i in range(0, middle.num_children(), 2): + slc = self.handle_slice(middle.get_child(i)) if not isinstance(slc, ast.Index): simple = False slices.append(slc) if not simple: ext_slice = ast.ExtSlice(slices) return ast.Subscript(left_expr, ext_slice, ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) elts = [] for idx in slices: assert isinstance(idx, ast.Index) elts.append(idx.value) - tup = ast.Tuple(elts, ast.Load, middle.lineno, middle.column) + tup = ast.Tuple(elts, ast.Load, middle.get_lineno(), middle.get_column()) return ast.Subscript(left_expr, ast.Index(tup), ast.Load, - middle.lineno, middle.column) + middle.get_lineno(), middle.get_column()) def handle_call(self, args_node, callable_expr): arg_count = 0 keyword_count = 0 generator_count = 0 - for argument in args_node.children: + for i in range(args_node.num_children()): + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: + if argument.num_children() == 1: arg_count += 1 - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: generator_count += 1 else: keyword_count += 1 @@ -1035,13 +1038,13 @@ used_keywords = {} variable_arg = None keywords_arg = None - child_count = len(args_node.children) + child_count = args_node.num_children() i = 0 while i < child_count: - argument = args_node.children[i] + argument = args_node.get_child(i) if argument.type == syms.argument: - if len(argument.children) == 1: - expr_node = argument.children[0] + if argument.num_children() == 1: + expr_node = argument.get_child(0) if keywords: self.error("non-keyword arg after keyword arg", expr_node) @@ -1049,10 +1052,10 @@ self.error("only named arguments may follow " "*expression", expr_node) args.append(self.handle_expr(expr_node)) - elif argument.children[1].type == syms.comp_for: + elif argument.get_child(1).type == syms.comp_for: args.append(self.handle_genexp(argument)) else: - keyword_node = argument.children[0] + keyword_node = argument.get_child(0) keyword_expr = self.handle_expr(keyword_node) if isinstance(keyword_expr, ast.Lambda): self.error("lambda cannot contain assignment", @@ -1065,13 +1068,13 @@ self.error("keyword argument repeated", keyword_node) used_keywords[keyword] = None self.check_forbidden_name(keyword, keyword_node) - keyword_value = self.handle_expr(argument.children[2]) + keyword_value = self.handle_expr(argument.get_child(2)) keywords.append(ast.keyword(keyword, keyword_value)) elif argument.type == tokens.STAR: - variable_arg = self.handle_expr(args_node.children[i + 1]) + variable_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 elif argument.type == tokens.DOUBLESTAR: - keywords_arg = self.handle_expr(args_node.children[i + 1]) + keywords_arg = self.handle_expr(args_node.get_child(i + 1)) i += 1 i += 1 if not args: @@ -1127,18 +1130,20 @@ return self.space.call_function(self.space.w_float, w_num_str) def handle_atom(self, atom_node): - first_child = atom_node.children[0] + first_child = atom_node.get_child(0) first_child_type = first_child.type if first_child_type == tokens.NAME: - name = self.new_identifier(first_child.value) - return ast.Name(name, ast.Load, first_child.lineno, - first_child.column) + name = self.new_identifier(first_child.get_value()) + return ast.Name(name, ast.Load, first_child.get_lineno(), + first_child.get_column()) elif first_child_type == tokens.STRING: space = self.space encoding = self.compile_info.encoding try: - sub_strings_w = [parsestring.parsestr(space, encoding, s.value) - for s in atom_node.children] + sub_strings_w = [ + parsestring.parsestr( + space, encoding, atom_node.get_child(i).get_value()) + for i in range(atom_node.num_children())] except error.OperationError, e: if not (e.match(space, space.w_UnicodeError) or e.match(space, space.w_ValueError)): @@ -1159,58 +1164,58 @@ # UnicodeError in literal: turn into SyntaxError strdata = space.isinstance_w(w_string, space.w_unicode) node = ast.Str if strdata else ast.Bytes - return node(w_string, atom_node.lineno, atom_node.column) + return node(w_string, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.NUMBER: - num_value = self.parse_number(first_child.value) - return ast.Num(num_value, atom_node.lineno, atom_node.column) + num_value = self.parse_number(first_child.get_value()) + return ast.Num(num_value, atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.ELLIPSIS: - return ast.Ellipsis(atom_node.lineno, atom_node.column) + return ast.Ellipsis(atom_node.get_lineno(), atom_node.get_column()) elif first_child_type == tokens.LPAR: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RPAR: - return ast.Tuple(None, ast.Load, atom_node.lineno, - atom_node.column) + return ast.Tuple(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) elif second_child.type == syms.yield_expr: return self.handle_expr(second_child) return self.handle_testlist_gexp(second_child) elif first_child_type == tokens.LSQB: - second_child = atom_node.children[1] + second_child = atom_node.get_child(1) if second_child.type == tokens.RSQB: - return ast.List(None, ast.Load, atom_node.lineno, - atom_node.column) - if len(second_child.children) == 1 or \ - second_child.children[1].type == tokens.COMMA: + return ast.List(None, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) + if second_child.num_children() == 1 or \ + second_child.get_child(1).type == tokens.COMMA: elts = self.get_expression_list(second_child) - return ast.List(elts, ast.Load, atom_node.lineno, - atom_node.column) + return ast.List(elts, ast.Load, atom_node.get_lineno(), + atom_node.get_column()) return self.handle_listcomp(second_child) elif first_child_type == tokens.LBRACE: - maker = atom_node.children[1] + maker = atom_node.get_child(1) if maker.type == tokens.RBRACE: - return ast.Dict(None, None, atom_node.lineno, atom_node.column) - n_maker_children = len(maker.children) - if n_maker_children == 1 or maker.children[1].type == tokens.COMMA: + return ast.Dict(None, None, atom_node.get_lineno(), atom_node.get_column()) + n_maker_children = maker.num_children() + if n_maker_children == 1 or maker.get_child(1).type == tokens.COMMA: elts = [] for i in range(0, n_maker_children, 2): - elts.append(self.handle_expr(maker.children[i])) - return ast.Set(elts, atom_node.lineno, atom_node.column) - if maker.children[1].type == syms.comp_for: + elts.append(self.handle_expr(maker.get_child(i))) + return ast.Set(elts, atom_node.get_lineno(), atom_node.get_column()) + if maker.get_child(1).type == syms.comp_for: return self.handle_setcomp(maker) if (n_maker_children > 3 and - maker.children[3].type == syms.comp_for): + maker.get_child(3).type == syms.comp_for): return self.handle_dictcomp(maker) keys = [] values = [] for i in range(0, n_maker_children, 4): - keys.append(self.handle_expr(maker.children[i])) - values.append(self.handle_expr(maker.children[i + 2])) - return ast.Dict(keys, values, atom_node.lineno, atom_node.column) + keys.append(self.handle_expr(maker.get_child(i))) + values.append(self.handle_expr(maker.get_child(i + 2))) + return ast.Dict(keys, values, atom_node.get_lineno(), atom_node.get_column()) else: raise AssertionError("unknown atom") def handle_testlist_gexp(self, gexp_node): - if len(gexp_node.children) > 1 and \ - gexp_node.children[1].type == syms.comp_for: + if gexp_node.num_children() > 1 and \ + gexp_node.get_child(1).type == syms.comp_for: return self.handle_genexp(gexp_node) return self.handle_testlist(gexp_node) @@ -1219,18 +1224,18 @@ current_for = comp_node while True: count += 1 - if len(current_for.children) == 5: - current_iter = current_for.children[4] + if current_for.num_children() == 5: + current_iter = current_for.get_child(4) else: return count while True: - first_child = current_iter.children[0] + first_child = current_iter.get_child(0) if first_child.type == syms.comp_for: - current_for = current_iter.children[0] + current_for = current_iter.get_child(0) break elif first_child.type == syms.comp_if: - if len(first_child.children) == 3: - current_iter = first_child.children[2] + if first_child.num_children() == 3: + current_iter = first_child.get_child(2) else: return count else: @@ -1239,23 +1244,23 @@ def count_comp_ifs(self, iter_node): count = 0 while True: - first_child = iter_node.children[0] + first_child = iter_node.get_child(0) if first_child.type == syms.comp_for: return count count += 1 - if len(first_child.children) == 2: + if first_child.num_children() == 2: return count - iter_node = first_child.children[2] + iter_node = first_child.get_child(2) def comprehension_helper(self, comp_node): fors_count = self.count_comp_fors(comp_node) comps = [] for i in range(fors_count): - for_node = comp_node.children[1] + for_node = comp_node.get_child(1) for_targets = self.handle_exprlist(for_node, ast.Store) - expr = self.handle_expr(comp_node.children[3]) + expr = self.handle_expr(comp_node.get_child(3)) assert isinstance(expr, ast.expr) - if len(for_node.children) == 1: + if for_node.num_children() == 1: comp = ast.comprehension(for_targets[0], expr, None) else: # Modified in python2.7, see http://bugs.python.org/issue6704 @@ -1266,53 +1271,53 @@ line = expr_node.lineno target = ast.Tuple(for_targets, ast.Store, line, col) comp = ast.comprehension(target, expr, None) - if len(comp_node.children) == 5: - comp_node = comp_iter = comp_node.children[4] + if comp_node.num_children() == 5: + comp_node = comp_iter = comp_node.get_child(4) assert comp_iter.type == syms.comp_iter ifs_count = self.count_comp_ifs(comp_iter) if ifs_count: ifs = [] for j in range(ifs_count): - comp_node = comp_if = comp_iter.children[0] - ifs.append(self.handle_expr(comp_if.children[1])) - if len(comp_if.children) == 3: - comp_node = comp_iter = comp_if.children[2] + comp_node = comp_if = comp_iter.get_child(0) + ifs.append(self.handle_expr(comp_if.get_child(1))) + if comp_if.num_children() == 3: + comp_node = comp_iter = comp_if.get_child(2) comp.ifs = ifs if comp_node.type == syms.comp_iter: - comp_node = comp_node.children[0] + comp_node = comp_node.get_child(0) assert isinstance(comp, ast.comprehension) comps.append(comp) return comps def handle_genexp(self, genexp_node): - elt = self.handle_expr(genexp_node.children[0]) - comps = self.comprehension_helper(genexp_node.children[1]) - return ast.GeneratorExp(elt, comps, genexp_node.lineno, - genexp_node.column) + elt = self.handle_expr(genexp_node.get_child(0)) + comps = self.comprehension_helper(genexp_node.get_child(1)) + return ast.GeneratorExp(elt, comps, genexp_node.get_lineno(), + genexp_node.get_column()) def handle_listcomp(self, listcomp_node): - elt = self.handle_expr(listcomp_node.children[0]) - comps = self.comprehension_helper(listcomp_node.children[1]) - return ast.ListComp(elt, comps, listcomp_node.lineno, - listcomp_node.column) + elt = self.handle_expr(listcomp_node.get_child(0)) + comps = self.comprehension_helper(listcomp_node.get_child(1)) + return ast.ListComp(elt, comps, listcomp_node.get_lineno(), + listcomp_node.get_column()) def handle_setcomp(self, set_maker): - elt = self.handle_expr(set_maker.children[0]) - comps = self.comprehension_helper(set_maker.children[1]) - return ast.SetComp(elt, comps, set_maker.lineno, - set_maker.column) + elt = self.handle_expr(set_maker.get_child(0)) + comps = self.comprehension_helper(set_maker.get_child(1)) + return ast.SetComp(elt, comps, set_maker.get_lineno(), + set_maker.get_column()) def handle_dictcomp(self, dict_maker): - key = self.handle_expr(dict_maker.children[0]) - value = self.handle_expr(dict_maker.children[2]) - comps = self.comprehension_helper(dict_maker.children[3]) - return ast.DictComp(key, value, comps, dict_maker.lineno, - dict_maker.column) + key = self.handle_expr(dict_maker.get_child(0)) + value = self.handle_expr(dict_maker.get_child(2)) + comps = self.comprehension_helper(dict_maker.get_child(3)) + return ast.DictComp(key, value, comps, dict_maker.get_lineno(), + dict_maker.get_column()) def handle_exprlist(self, exprlist, context): exprs = [] - for i in range(0, len(exprlist.children), 2): - child = exprlist.children[i] + for i in range(0, exprlist.num_children(), 2): + child = exprlist.get_child(i) expr = self.handle_expr(child) self.set_context(expr, context) exprs.append(expr) diff --git a/pypy/interpreter/pyparser/parser.py b/pypy/interpreter/pyparser/parser.py --- a/pypy/interpreter/pyparser/parser.py +++ b/pypy/interpreter/pyparser/parser.py @@ -44,26 +44,125 @@ class Node(object): - __slots__ = "type value children lineno column".split() + __slots__ = ("type", ) - def __init__(self, type, value, children, lineno, column): + def __init__(self, type): self.type = type + + def __eq__(self, other): + raise NotImplementedError("abstract base class") + + def __ne__(self, other): + return not self == other + + def get_value(self): + return None + + def get_child(self, i): + raise NotImplementedError("abstract base class") + + def num_children(self): + return 0 + + def append_child(self, child): + raise NotImplementedError("abstract base class") + + def get_lineno(self): + raise NotImplementedError("abstract base class") + + def get_column(self): + raise NotImplementedError("abstract base class") + + +class Terminal(Node): + __slots__ = ("value", "lineno", "column") + def __init__(self, type, value, lineno, column): + Node.__init__(self, type) self.value = value - self.children = children self.lineno = lineno self.column = column + def __repr__(self): + return "Terminal(type=%s, value=%r)" % (self.type, self.value) + def __eq__(self, other): # For tests. - return (self.type == other.type and - self.value == other.value and - self.children == other.children) + return (type(self) == type(other) and + self.type == other.type and + self.value == other.value) + + def get_value(self): + return self.value + + def get_lineno(self): + return self.lineno + + def get_column(self): + return self.column + + +class AbstractNonterminal(Node): + __slots__ = () + + def get_lineno(self): + return self.get_child(0).get_lineno() + + def get_column(self): + return self.get_child(0).get_column() + + def __eq__(self, other): + # For tests. + # grumble, annoying + if not isinstance(other, AbstractNonterminal): + return False + if self.type != other.type: + return False + if self.num_children() != other.num_children(): + return False + for i in range(self.num_children()): + if self.get_child(i) != other.get_child(i): + return False + return True + + +class Nonterminal(AbstractNonterminal): + __slots__ = ("_children", ) + def __init__(self, type, children): + Node.__init__(self, type) + self._children = children def __repr__(self): - if self.value is None: - return "Node(type=%s, children=%r)" % (self.type, self.children) - else: - return "Node(type=%s, value=%r)" % (self.type, self.value) + return "Nonterminal(type=%s, children=%r)" % (self.type, self._children) + + def get_child(self, i): + return self._children[i] + + def num_children(self): + return len(self._children) + + def append_child(self, child): + self._children.append(child) + + +class Nonterminal1(AbstractNonterminal): + __slots__ = ("_child", ) + def __init__(self, type, child): + Node.__init__(self, type) + self._child = child + + def __repr__(self): + return "Nonterminal(type=%s, children=[%r])" % (self.type, self._child) + + def get_child(self, i): + assert i == 0 or i == -1 + return self._child + + def num_children(self): + return 1 + + def append_child(self, child): + assert 0, "should be unreachable" + class ParseError(Exception): @@ -97,7 +196,7 @@ if start == -1: start = self.grammar.start self.root = None - current_node = Node(start, None, [], 0, 0) + current_node = Nonterminal(start, []) self.stack = [] self.stack.append((self.grammar.dfas[start - 256], 0, current_node)) @@ -164,14 +263,14 @@ def shift(self, next_state, token_type, value, lineno, column): """Shift a non-terminal and prepare for the next state.""" dfa, state, node = self.stack[-1] - new_node = Node(token_type, value, None, lineno, column) - node.children.append(new_node) + new_node = Terminal(token_type, value, lineno, column) + node.append_child(new_node) self.stack[-1] = (dfa, next_state, node) def push(self, next_dfa, next_state, node_type, lineno, column): """Push a terminal and adjust the current state.""" dfa, state, node = self.stack[-1] - new_node = Node(node_type, None, [], lineno, column) + new_node = Nonterminal(node_type, []) self.stack[-1] = (dfa, next_state, node) self.stack.append((next_dfa, 0, new_node)) @@ -179,6 +278,10 @@ """Pop an entry off the stack and make its node a child of the last.""" dfa, state, node = self.stack.pop() if self.stack: - self.stack[-1][2].children.append(node) + # we are now done with node, so we can store it more efficiently if + # it has just one child + if node.num_children() == 1: + node = Nonterminal1(node.type, node.get_child(0)) + self.stack[-1][2].append_child(node) else: self.root = node diff --git a/pypy/interpreter/pyparser/pygram.py b/pypy/interpreter/pyparser/pygram.py --- a/pypy/interpreter/pyparser/pygram.py +++ b/pypy/interpreter/pyparser/pygram.py @@ -28,8 +28,11 @@ class _Symbols(object): pass +rev_lookup = {} for sym_name, idx in python_grammar.symbol_ids.iteritems(): setattr(_Symbols, sym_name, idx) + rev_lookup[idx] = sym_name syms = _Symbols() +syms._rev_lookup = rev_lookup # for debugging del _get_python_grammar, _Tokens, tok_name, sym_name, idx diff --git a/pypy/interpreter/pyparser/test/test_parser.py b/pypy/interpreter/pyparser/test/test_parser.py --- a/pypy/interpreter/pyparser/test/test_parser.py +++ b/pypy/interpreter/pyparser/test/test_parser.py @@ -52,24 +52,23 @@ value = "\n" else: value = "" - children = None + n = parser.Terminal(tp, value, 0, 0) else: tp = gram.symbol_ids[data[0]] - value = None children = [] - n = parser.Node(tp, value, children, 0, 0) + n = parser.Nonterminal(tp, children) new_indent = count_indent(line) if new_indent >= last_indent: if new_indent == last_indent and node_stack: node_stack.pop() if node_stack: - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) else: diff = last_indent - new_indent pop_nodes = diff // 4 + 1 del node_stack[-pop_nodes:] - node_stack[-1].children.append(n) + node_stack[-1].append_child(n) node_stack.append(n) last_indent = new_indent return node_stack[0] diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -15,21 +15,21 @@ @specialize.arg(3) def _build_app_tree(self, space, node, seq_maker, with_lineno, with_column): - if node.children is not None: - seq_w = [None]*(len(node.children) + 1) + if node.num_children(): + seq_w = [None]*(node.num_children() + 1) seq_w[0] = space.wrap(node.type) - for i in range(1, len(node.children) + 1): - seq_w[i] = self._build_app_tree(space, node.children[i - 1], + for i in range(1, node.num_children() + 1): + seq_w[i] = self._build_app_tree(space, node.get_child(i - 1), seq_maker, with_lineno, with_column) else: seq_w = [None]*(2 + with_lineno + with_column) seq_w[0] = space.wrap(node.type) - seq_w[1] = space.wrap(node.value) + seq_w[1] = space.wrap(node.get_value()) if with_lineno: - seq_w[2] = space.wrap(node.lineno) + seq_w[2] = space.wrap(node.get_lineno()) if with_column: - seq_w[3] = space.wrap(node.column) + seq_w[3] = space.wrap(node.get_column()) return seq_maker(seq_w) def descr_issuite(self, space): From pypy.commits at gmail.com Wed Apr 27 20:35:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 17:35:09 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: hg merge default (before cpyext-for-merge merge) Message-ID: <57215abd.2171c20a.2c620.411b@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r83997:c1ed6e5f8a6d Date: 2016-04-28 01:34 +0100 http://bitbucket.org/pypy/pypy/changeset/c1ed6e5f8a6d/ Log: hg merge default (before cpyext-for-merge merge) diff too long, truncating to 2000 out of 4376 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -20,3 +20,4 @@ 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 +3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1 diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -205,15 +205,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -223,34 +214,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -261,22 +232,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -292,14 +251,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -312,15 +267,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -102,15 +102,15 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev + tk-dev libgc-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -387,6 +387,14 @@ wrappers. On PyPy we can't tell the difference, so ``ismethod([].__add__) == ismethod(list.__add__) == True``. +* in CPython, the built-in types have attributes that can be + implemented in various ways. Depending on the way, if you try to + write to (or delete) a read-only (or undeletable) attribute, you get + either a ``TypeError`` or an ``AttributeError``. PyPy tries to + strike some middle ground between full consistency and full + compatibility here. This means that a few corner cases don't raise + the same exception, like ``del (lambda:None).__closure__``. + * in pure Python, if you write ``class A(object): def f(self): pass`` and have a subclass ``B`` which doesn't override ``f()``, then ``B.f(x)`` still checks that ``x`` is an instance of ``B``. In diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -21,7 +21,7 @@ :source:`pypy/doc/discussion/` drafts of ideas and documentation -:source:`pypy/goal/` our :ref:`main PyPy-translation scripts ` +:source:`pypy/goal/` our main PyPy-translation scripts live here :source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -13,3 +13,4 @@ discussion/improve-rpython discussion/ctypes-implementation discussion/jit-profiler + discussion/rawrefcount diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -106,8 +106,12 @@ For information on which third party extensions work (or do not work) with PyPy see the `compatibility wiki`_. +For more information about how we manage refcounting semamtics see +rawrefcount_ + .. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ +.. _rawrefcount: discussion/rawrefcount.html On which platforms does PyPy run? diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. User Class Optimizations @@ -114,8 +120,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst --- a/pypy/doc/introduction.rst +++ b/pypy/doc/introduction.rst @@ -1,16 +1,22 @@ What is PyPy? ============= -In common parlance, PyPy has been used to mean two things. The first is the -:ref:`RPython translation toolchain `, which is a framework for generating -dynamic programming language implementations. And the second is one -particular implementation that is so generated -- -an implementation of the Python_ programming language written in -Python itself. It is designed to be flexible and easy to experiment with. +Historically, PyPy has been used to mean two things. The first is the +:ref:`RPython translation toolchain ` for generating +interpreters for dynamic programming languages. And the second is one +particular implementation of Python_ produced with it. Because RPython +uses the same syntax as Python, this generated version became known as +Python interpreter written in Python. It is designed to be flexible and +easy to experiment with. -This double usage has proven to be confusing, and we are trying to move -away from using the word PyPy to mean both things. From now on we will -try to use PyPy to only mean the Python implementation, and say the +To make it more clear, we start with source code written in RPython, +apply the RPython translation toolchain, and end up with PyPy as a +binary executable. This executable is the Python interpreter. + +Double usage has proven to be confusing, so we've moved away from using +the word PyPy to mean both toolchain and generated interpreter. Now we +use word PyPy to refer to the Python implementation, and explicitly +mention :ref:`RPython translation toolchain ` when we mean the framework. Some older documents, presentations, papers and videos will still have the old diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst --- a/pypy/doc/release-5.1.0.rst +++ b/pypy/doc/release-5.1.0.rst @@ -3,10 +3,17 @@ ======== We have released PyPy 5.1, about a month after PyPy 5.0. -We encourage all users of PyPy to update to this version. Apart from the usual -bug fixes, there is an ongoing effort to improve the warmup time and memory -usage of JIT-related metadata, and we now fully support the IBM s390x -architecture. + +This release includes more improvement to warmup time and memory +requirements. We have seen about a 20% memory requirement reduction and up to +30% warmup time improvement, more detail in the `blog post`_. + +We also now have `fully support for the IBM s390x`_. Since this support is in +`RPython`_, any dynamic language written using RPython, like PyPy, will +automagically be supported on that architecture. + +We updated cffi_ to 1.6, and continue to improve support for the wider +python ecosystem using the PyPy interpreter. You can download the PyPy 5.1 release here: @@ -26,6 +33,9 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _cffi: https://cffi.readthedocs.org +.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html +.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html What is PyPy? ============= @@ -46,7 +56,7 @@ * big- and little-endian variants of **PPC64** running Linux, - * **s960x** running Linux + * **s390x** running Linux .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org @@ -74,6 +84,8 @@ * Fix a corner case in the JIT * Fix edge cases in the cpyext refcounting-compatible semantics + (more work on cpyext compatibility is coming in the ``cpyext-ext`` + branch, but isn't ready yet) * Try harder to not emit NEON instructions on ARM processors without NEON support @@ -92,11 +104,17 @@ * Fix sandbox startup (a regression in 5.0) + * Fix possible segfault for classes with mangled mro or __metaclass__ + + * Fix isinstance(deque(), Hashable) on the pure python deque + + * Fix an issue with forkpty() + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* Numpy: +* Numpy_: * Implemented numpy.where for a single argument @@ -108,6 +126,8 @@ functions exported from libpypy.so are declared in pypy_numpy.h, which is included only when building our fork of numpy + * Add broadcast + * Performance improvements: * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting @@ -119,14 +139,18 @@ * Remove the forced minor collection that occurs when rewriting the assembler at the start of the JIT backend + * Port the resource module to cffi + * Internal refactorings: * Use a simpler logger to speed up translation * Drop vestiges of Python 2.5 support in testing + * Update rpython functions with ones needed for py3k + .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html -.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html +.. _Numpy: https://bitbucket.org/pypy/numpy Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst --- a/pypy/doc/whatsnew-5.1.0.rst +++ b/pypy/doc/whatsnew-5.1.0.rst @@ -60,3 +60,13 @@ Remove old uneeded numpy headers, what is left is only for testing. Also generate pypy_numpy.h which exposes functions to directly use micronumpy ndarray and ufuncs + +.. branch: rposix-for-3 + +Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). +This updates the underlying rpython functions with the ones needed for the +py3k branch + +.. branch: numpy_broadcast + +Add broadcast to micronumpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,14 +3,22 @@ ========================= .. this is a revision shortly after release-5.1 -.. startrev: 2180e1eaf6f6 +.. startrev: aa60332382a1 -.. branch: rposix-for-3 +.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 -Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat(). -This updates the underlying rpython functions with the ones needed for the -py3k branch - -.. branch: numpy_broadcast +.. branch: gcheader-decl -Add broadcast to micronumpy +Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -138,6 +138,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount argcount += self.co_kwonlyargcount @@ -174,9 +175,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -157,7 +157,6 @@ ec.bytecode_trace(self) next_instr = r_uint(self.last_instr) opcode = ord(co_code[next_instr]) - #print 'executing', self.last_instr, bytecode_spec.method_names[opcode] next_instr += 1 if opcode >= HAVE_ARGUMENT: @@ -905,8 +904,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: @@ -1537,7 +1535,6 @@ return r_uint(self.handlerposition) # jump to the handler - class WithBlock(FinallyBlock): _immutable_ = True diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,57 +157,16 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -865,6 +865,3 @@ a.__eq__ = 42 assert a.__eq__ == 42 - -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -108,9 +108,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -46,6 +46,7 @@ '_get_types': 'func._get_types', '_get_common_types': 'func._get_common_types', 'from_buffer': 'func.from_buffer', + 'gcp': 'func.gcp', 'string': 'func.string', 'unpack': 'func.unpack', diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py --- a/pypy/module/_cffi_backend/lib_obj.py +++ b/pypy/module/_cffi_backend/lib_obj.py @@ -64,7 +64,8 @@ # ptr = rffi.cast(rffi.CCHARP, g.c_address) assert ptr - return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn, + return W_FunctionWrapper(self.space, self.ffi, + ptr, g.c_size_or_direct_fn, rawfunctype, fnname, self.libname) @jit.elidable_promote() diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -238,7 +238,7 @@ self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and locs[0] == 'R') - def unexpected_fn_type(self, ffi): + def repr_fn_type(self, ffi, repl=""): fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: @@ -246,9 +246,14 @@ sargs = ', '.join(argnames) sret1 = fret.name[:fret.name_position] sret2 = fret.name[fret.name_position:] + if len(repl) > 0 and not sret1.endswith('*'): + repl = " " + repl + return '%s%s(%s)%s' % (sret1, repl, sargs, sret2) + + def unexpected_fn_type(self, ffi): raise oefmt(ffi.w_FFIError, - "the type '%s(%s)%s' is a function type, not a " - "pointer-to-function type", sret1, sargs, sret2) + "the type '%s' is a function type, not a " + "pointer-to-function type", self.repr_fn_type(ffi)) def realize_c_type(ffi, opcodes, index): diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -420,9 +420,11 @@ def test_math_sin_type(self): ffi, lib = self.prepare( - "double sin(double);", + "double sin(double); void *xxtestfunc();", 'test_math_sin_type', - '#include ') + """#include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -432,7 +434,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__=='direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(self): ffi, lib = self.prepare( @@ -1762,14 +1773,14 @@ def test_introspect_order(self): ffi, lib = self.prepare(""" - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """, "test_introspect_order", """ - union aaa { int a; }; typedef struct ccc { int a; } b; - union g { int a; }; typedef struct cc { int a; } bbb; - union aa { int a; }; typedef struct a { int a; } bb; + union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb; + union CFFIg { int a; }; typedef struct CFFIcc { int a; } CFFIbbb; + union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb; """) - assert ffi.list_types() == (['b', 'bb', 'bbb'], - ['a', 'cc', 'ccc'], - ['aa', 'aaa', 'g']) + assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'], + ['CFFIa', 'CFFIcc', 'CFFIccc'], + ['CFFIaa', 'CFFIaaa', 'CFFIg']) diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.typedef import GetSetProperty from pypy.interpreter.gateway import interp2app from rpython.rlib import jit @@ -24,9 +25,8 @@ This class cannot be used for variadic functions. """ _immutable_ = True - common_doc_str = 'direct call to the C function of the same name' - def __init__(self, space, fnptr, directfnptr, + def __init__(self, space, ffi, fnptr, directfnptr, rawfunctype, fnname, modulename): # everything related to the type of the function is accessed # as immutable attributes of the 'rawfunctype' object, which @@ -39,6 +39,7 @@ assert locs is None or len(ctype.fargs) == len(locs) # self.space = space + self.ffi = ffi self.fnptr = fnptr self.directfnptr = directfnptr self.rawfunctype = rawfunctype @@ -91,7 +92,13 @@ return ctype._call(self.fnptr, args_w) def descr_repr(self, space): - return space.wrap("" % (self.fnname,)) + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + return space.wrap("" % (doc,)) + + def descr_get_doc(self, space): + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + doc = '%s;\n\nCFFI C function from %s.lib' % (doc, self.modulename) + return space.wrap(doc) @jit.unroll_safe @@ -128,6 +135,6 @@ __call__ = interp2app(W_FunctionWrapper.descr_call), __name__ = interp_attrproperty('fnname', cls=W_FunctionWrapper), __module__ = interp_attrproperty('modulename', cls=W_FunctionWrapper), - __doc__ = interp_attrproperty('common_doc_str', cls=W_FunctionWrapper), + __doc__ = GetSetProperty(W_FunctionWrapper.descr_get_doc), ) W_FunctionWrapper.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/include/listobject.h b/pypy/module/cpyext/include/listobject.h --- a/pypy/module/cpyext/include/listobject.h +++ b/pypy/module/cpyext/include/listobject.h @@ -1,2 +1,1 @@ #define PyList_GET_ITEM PyList_GetItem -#define PyList_SET_ITEM PyList_SetItem diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -21,6 +21,25 @@ """ return space.newlist([None] * len) + at cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) +def PyList_SET_ITEM(space, w_list, index, w_item): + """Macro form of PyList_SetItem() without error checking. This is normally + only used to fill in new lists where there is no previous content. + + This function "steals" a reference to item, and, unlike PyList_SetItem(), + does not discard a reference to any item that it being replaced; any + reference in list at position i will be leaked. + """ + assert isinstance(w_list, W_ListObject) + assert 0 <= index < w_list.length() + # Deliberately leak, so that it can be safely decref'd. + make_ref(space, w_list.getitem(index)) + Py_DecRef(space, w_item) + w_list.setitem(index, w_item) + return w_item + + @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyList_SetItem(space, w_list, index, w_item): """Set the item at index index in list to item. Return 0 on success diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -52,6 +52,9 @@ @cpython_api([PyObject], lltype.Void) def PyObject_dealloc(space, obj): + # This frees an object after its refcount dropped to zero, so we + # assert that it is really zero here. + assert obj.c_ob_refcnt == 0 pto = obj.c_ob_type obj_voidp = rffi.cast(rffi.VOIDP, obj) generic_cpy_call(space, pto.c_tp_free, obj_voidp) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -52,7 +52,8 @@ def PyEval_ThreadsInitialized(space): if not space.config.translation.thread: return 0 - return 1 + from pypy.module.thread import os_thread + return int(os_thread.threads_initialized(space)) # XXX: might be generally useful def encapsulator(T, flavor='raw', dealloc=None): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -106,7 +106,6 @@ """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', 'itertools', 'time', 'binascii', 'micronumpy']) - spaceconfig['std.withmethodcache'] = True enable_leak_checking = True diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -136,3 +136,45 @@ l = [1, 2, 3] module.setlistitem(l,0) assert l == [None, 2, 3] + + def test_get_item_macro(self): + module = self.import_extension('foo', [ + ("test_get_item", "METH_NOARGS", + """ + PyObject* o = PyList_New(1); + + PyObject* o2 = PyInt_FromLong(0); + PyList_SET_ITEM(o, 0, o2); + o2 = NULL; + + PyObject* o3 = PyList_GET_ITEM(o, 0); + Py_INCREF(o3); + Py_CLEAR(o); + return o3; + """)]) + assert module.test_get_item() == 0 + + def test_set_item_macro(self): + """PyList_SET_ITEM leaks a reference to the target.""" + module = self.import_extension('foo', [ + ("test_refcount_diff_after_setitem", "METH_NOARGS", + """ + PyObject* o = PyList_New(0); + PyObject* o2 = PyList_New(0); + + PyList_Append(o, o2); // does not steal o2 + + Py_ssize_t refcount = Py_REFCNT(o2); + + // Steal a reference to o2, but leak the old reference to o2. + // The net result should be no change in refcount. + PyList_SET_ITEM(o, 0, o2); + + Py_ssize_t new_refcount = Py_REFCNT(o2); + + Py_CLEAR(o); + Py_DECREF(o2); // append incref'd. + // Py_CLEAR(o2); // naive implementation would fail here. + return PyLong_FromSsize_t(new_refcount - refcount); + """)]) + assert module.test_refcount_diff_after_setitem() == 0 diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -104,7 +104,19 @@ return PyLong_FromLong(3); """), ]) + res = module.bounce() + assert res == 3 + def test_threadsinitialized(self): + module = self.import_extension('foo', [ + ("test", "METH_NOARGS", + """ + return PyInt_FromLong(PyEval_ThreadsInitialized()); + """), + ]) + res = module.test() + print "got", res + assert res in (0, 1) class TestInterpreterState(BaseApiTest): diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -6,15 +6,14 @@ @unwrap_spec(generation=int) def collect(space, generation=0): "Run a full collection. The optional argument is ignored." - # First clear the method cache. See test_gc for an example of why. - if space.config.objspace.std.withmethodcache: - from pypy.objspace.std.typeobject import MethodCache - cache = space.fromcache(MethodCache) - cache.clear() - if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import MapAttrCache - cache = space.fromcache(MapAttrCache) - cache.clear() + # First clear the method and the map cache. + # See test_gc for an example of why. + from pypy.objspace.std.typeobject import MethodCache + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MethodCache) + cache.clear() + cache = space.fromcache(MapAttrCache) + cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -106,7 +106,6 @@ class AppTestGcMethodCache(object): - spaceconfig = {"objspace.std.withmethodcache": True} def test_clear_method_cache(self): import gc, weakref @@ -127,10 +126,6 @@ assert r() is None -class AppTestGcMapDictIndexCache(AppTestGcMethodCache): - spaceconfig = {"objspace.std.withmethodcache": True, - "objspace.std.withmapdict": True} - def test_clear_index_cache(self): import gc, weakref rlist = [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,9 +25,9 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p65 = getfield_gc_r(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -417,8 +417,11 @@ def test_math_sin_type(): ffi = FFI() - ffi.cdef("double sin(double);") - lib = verify(ffi, 'test_math_sin_type', '#include ') + ffi.cdef("double sin(double); void *xxtestfunc();") + lib = verify(ffi, 'test_math_sin_type', """ + #include + void *xxtestfunc(void) { return 0; } + """) # 'lib.sin' is typed as a object on lib assert ffi.typeof(lib.sin).cname == "double(*)(double)" # 'x' is another object on lib, made very indirectly @@ -428,7 +431,16 @@ # present on built-in functions on CPython; must be emulated on PyPy: assert lib.sin.__name__ == 'sin' assert lib.sin.__module__ == '_CFFI_test_math_sin_type' - assert lib.sin.__doc__ == 'direct call to the C function of the same name' + assert lib.sin.__doc__ == ( + "double sin(double);\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") + + assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()" + assert lib.xxtestfunc.__doc__ == ( + "void *xxtestfunc();\n" + "\n" + "CFFI C function from _CFFI_test_math_sin_type.lib") def test_verify_anonymous_struct_with_typedef(): ffi = FFI() diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -34,6 +34,9 @@ result = False # already set up return result + def threads_initialized(self): + return self.gil_ready + ## def reinit_threads(self, space): ## "Called in the child process after a fork()" ## OSThreadLocals.reinit_threads(self, space) diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -148,6 +148,9 @@ space.threadlocals.setup_threads(space) bootstrapper.setup(space) +def threads_initialized(space): + return space.threadlocals.threads_initialized() + def reinit_threads(space): "Called in the child process after a fork()" diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,7 @@ import time from pypy.module.thread import gil +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem import lltype from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread @@ -81,10 +83,13 @@ while len(state.data) < 2*N: debug_print(len(state.data)) if not still_waiting: + llop.debug_print(lltype.Void, "timeout. progress: " + "%d of 2*N (= %f%%)" % \ + (len(state.data), 2*N, 100*len(state.data)/(2.0*N))) raise ValueError("time out") still_waiting -= 1 if not we_are_translated(): rgil.release() - time.sleep(0.01) + time.sleep(0.1) if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -121,6 +121,8 @@ 'set', 'frozenset', 'bytearray', 'memoryview'] class FakeObjSpace(ObjSpace): + is_fake_objspace = True + def __init__(self, config=None): self._seen_extras = [] ObjSpace.__init__(self, config=config) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -640,34 +640,12 @@ return [ord(s) for s in value] W_BytesObject.EMPTY = W_BytesObject('') -W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] -del i def wrapstr(space, s): - if space.config.objspace.std.sharesmallstr: - if space.config.objspace.std.withprebuiltchar: - # share characters and empty string - if len(s) <= 1: - if len(s) == 0: - return W_BytesObject.EMPTY - else: - s = s[0] # annotator hint: a single char - return wrapchar(space, s) - else: - # only share the empty string - if len(s) == 0: - return W_BytesObject.EMPTY return W_BytesObject(s) -def wrapchar(space, c): - if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): - return W_BytesObject.PREBUILT[ord(c)] - else: - return W_BytesObject(c) - - def getbytevalue(space, w_value): value = space.getindex_w(w_value, None) if not 0 <= value < 256: diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -23,6 +23,7 @@ def LOOKUP_METHOD(f, nameindex, *ignored): + from pypy.objspace.std.typeobject import MutableCell # stack before after # -------------- --fast-method----fallback-case------------ # @@ -33,7 +34,7 @@ space = f.space w_obj = f.popvalue() - if space.config.objspace.std.withmapdict and not jit.we_are_jitted(): + if not jit.we_are_jitted(): # mapdict has an extra-fast version of this function if LOOKUP_METHOD_mapdict(f, nameindex, w_obj): return @@ -44,7 +45,18 @@ w_type = space.type(w_obj) if w_type.has_object_getattribute(): name = space.str_w(w_name) - w_descr = w_type.lookup(name) + # bit of a mess to use these internal functions, but it allows the + # mapdict caching below to work without an additional lookup + version_tag = w_type.version_tag() + if version_tag is None: + _, w_descr = w_type._lookup_where(name) + w_descr_cell = None + else: + _, w_descr_cell = w_type._pure_lookup_where_with_method_cache( + name, version_tag) + w_descr = w_descr_cell + if isinstance(w_descr, MutableCell): + w_descr = w_descr.unwrap_cell(space) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -59,11 +71,11 @@ # nothing in the instance f.pushvalue(w_descr) f.pushvalue(w_obj) - if (space.config.objspace.std.withmapdict and - not jit.we_are_jitted()): + if not jit.we_are_jitted(): # let mapdict cache stuff LOOKUP_METHOD_mapdict_fill_cache_method( - space, f.getcode(), name, nameindex, w_obj, w_type) + space, f.getcode(), name, nameindex, w_obj, w_type, + w_descr_cell) return if w_value is None: w_value = space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -66,10 +66,10 @@ w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) W_ModuleDictObject.__init__(w_obj, space, strategy, storage) return w_obj - elif space.config.objspace.std.withmapdict and instance: + elif instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) - elif instance or strdict or module: + elif strdict or module: assert w_type is None strategy = space.fromcache(UnicodeDictStrategy) elif kwargs: @@ -528,7 +528,6 @@ def switch_to_correct_strategy(self, w_dict, w_key): from pypy.objspace.std.intobject import W_IntObject - withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: self.switch_to_bytes_strategy(w_dict) return @@ -539,7 +538,7 @@ self.switch_to_int_strategy(w_dict) return w_type = self.space.type(w_key) - if withidentitydict and w_type.compares_by_identity(): + if w_type.compares_by_identity(): self.switch_to_identity_strategy(w_dict) else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -67,12 +67,7 @@ @jit.elidable def find_map_attr(self, name, index): - if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(name, index) - return self._find_map_attr(name, index) - - @jit.dont_look_inside - def _find_map_attr_cache(self, name, index): + # attr cache space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -429,7 +424,6 @@ class MapAttrCache(object): def __init__(self, space): - assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self.names = [None] * SIZE @@ -456,12 +450,19 @@ INVALID = 2 SLOTS_STARTING_FROM = 3 +# a little bit of a mess of mixin classes that implement various pieces of +# objspace user object functionality in terms of mapdict -class BaseMapdictObject: - _mixin_ = True +class BaseUserClassMapdict: + # everything that's needed to use mapdict for a user subclass at all. + # This immediately makes slots possible. - def _init_empty(self, map): - raise NotImplementedError("abstract base class") + # assumes presence of _init_empty, _mapdict_read_storage, + # _mapdict_write_storage, _mapdict_storage_length, + # _set_mapdict_storage_and_map + + # _____________________________________________ + # methods needed for mapdict def _become(self, new_obj): self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) @@ -470,49 +471,11 @@ return jit.promote(self.map) def _set_mapdict_map(self, map): self.map = map + # _____________________________________________ # objspace interface - def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, attrname, DICT) - - def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, attrname, DICT, w_value) - - def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, attrname, DICT) - if new_obj is None: - return False - self._become(new_obj) - return True - - def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) - if w_dict is not None: - assert isinstance(w_dict, W_DictMultiObject) - return w_dict - - strategy = space.fromcache(MapDictStrategy) - storage = strategy.erase(self) - w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag - return w_dict - - def setdict(self, space, w_dict): - from pypy.interpreter.typedef import check_new_dictionary - w_dict = check_new_dictionary(space, w_dict) - w_olddict = self.getdict(space) - assert isinstance(w_dict, W_DictMultiObject) - # The old dict has got 'self' as dstorage, but we are about to - # change self's ("dict", SPECIAL) attribute to point to the - # new dict. If the old dict was using the MapDictStrategy, we - # have to force it now: otherwise it would remain an empty - # shell that continues to delegate to 'self'. - if type(w_olddict.get_strategy()) is MapDictStrategy: - w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) - assert flag + # class access def getclass(self, space): return self._get_mapdict_map().terminator.w_cls @@ -523,9 +486,13 @@ def user_setup(self, space, w_subtype): self.space = space - assert not self.typedef.hasdict + assert (not self.typedef.hasdict or + isinstance(w_subtype.terminator, NoDictTerminator)) self._init_empty(w_subtype.terminator) + + # methods needed for slots + def getslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex return self._get_mapdict_map().read(self, "slot", index) @@ -542,7 +509,9 @@ self._become(new_obj) return True - # used by _weakref implemenation + +class MapdictWeakrefSupport(object): + # stuff used by the _weakref implementation def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline @@ -563,8 +532,71 @@ self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True -class ObjectMixin(object): - _mixin_ = True + +class MapdictDictSupport(object): + + # objspace interface for dictionary operations + + def getdictvalue(self, space, attrname): + return self._get_mapdict_map().read(self, attrname, DICT) + + def setdictvalue(self, space, attrname, w_value): + return self._get_mapdict_map().write(self, attrname, DICT, w_value) + + def deldictvalue(self, space, attrname): + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) + if new_obj is None: + return False + self._become(new_obj) + return True + + def getdict(self, space): + return _obj_getdict(self, space) + + def setdict(self, space, w_dict): + _obj_setdict(self, space, w_dict) + +# a couple of helpers for the classes above, factored out to reduce +# the translated code size + + at objectmodel.dont_inline +def _obj_getdict(self, space): + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) + if w_dict is not None: + assert isinstance(w_dict, W_DictMultiObject) + return w_dict + + strategy = space.fromcache(MapDictStrategy) + storage = strategy.erase(self) + w_dict = W_DictObject(space, strategy, storage) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + return w_dict + + at objectmodel.dont_inline +def _obj_setdict(self, space, w_dict): + from pypy.interpreter.error import OperationError + terminator = self._get_mapdict_map().terminator + assert isinstance(terminator, DictTerminator) or isinstance(terminator, DevolvedDictTerminator) + if not space.isinstance_w(w_dict, space.w_dict): + raise OperationError(space.w_TypeError, + space.wrap("setting dictionary to a non-dict")) + assert isinstance(w_dict, W_DictMultiObject) + w_olddict = self.getdict(space) + assert isinstance(w_olddict, W_DictMultiObject) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + assert flag + +class MapdictStorageMixin(object): def _init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -583,51 +615,32 @@ self.storage = storage self.map = map -class Object(ObjectMixin, BaseMapdictObject, W_Root): - pass # mainly for tests +class ObjectWithoutDict(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def get_subclass_of_correct_size(space, cls, w_type): - assert space.config.objspace.std.withmapdict - map = w_type.terminator - classes = memo_get_subclass_of_correct_size(space, cls) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - return classes[0] - size = map.size_estimate() - debug.check_nonneg(size) - if size < len(classes): - return classes[size] - else: - return classes[len(classes)-1] -get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)" + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) -SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers -SUBCLASSES_MAX_FIELDS = 5 -def memo_get_subclass_of_correct_size(space, supercls): - key = space, supercls - try: - return _subclass_cache[key] - except KeyError: - assert not hasattr(supercls, "__del__") - result = [] - for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1): - result.append(_make_subclass_size_n(supercls, i)) - for i in range(SUBCLASSES_MIN_FIELDS): - result.insert(0, result[0]) - if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS: - assert len(set(result)) == 1 - _subclass_cache[key] = result - return result -memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" -_subclass_cache = {} +class Object(W_Root): + # mainly for tests + objectmodel.import_from_mixin(MapdictStorageMixin) -def _make_subclass_size_n(supercls, n): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictWeakrefSupport) + objectmodel.import_from_mixin(MapdictDictSupport) + + +SUBCLASSES_NUM_FIELDS = 5 + +def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): from rpython.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) nmin1 = n - 1 rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 - class subcls(BaseMapdictObject, supercls): + class subcls(object): def _init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) @@ -695,7 +708,7 @@ erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) - subcls.__name__ = supercls.__name__ + "Size%s" % n + subcls.__name__ = "Size%s" % n return subcls # ____________________________________________________________ @@ -962,7 +975,7 @@ name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a MutableCell, which may change without changing the version_tag - _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( + _, w_descr = w_type._pure_lookup_where_with_method_cache( name, version_tag) # attrname, index = ("", INVALID) @@ -1009,22 +1022,15 @@ return False def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex, - w_obj, w_type): + w_obj, w_type, w_method): + if w_method is None or isinstance(w_method, MutableCell): + # don't cache the MutableCell XXX could be fixed + return version_tag = w_type.version_tag() - if version_tag is None: - return + assert version_tag is not None map = w_obj._get_mapdict_map() if map is None or isinstance(map.terminator, DevolvedDictTerminator): return - # We know here that w_obj.getdictvalue(space, name) just returned None, - # so the 'name' is not in the instance. We repeat the lookup to find it - # in the class, this time taking care of the result: it can be either a - # quasi-constant class attribute, or actually a MutableCell --- which we - # must not cache. (It should not be None here, but you never know...) - _, w_method = w_type._pure_lookup_where_possibly_with_method_cache( - name, version_tag) - if w_method is None or isinstance(w_method, MutableCell): - return _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -372,15 +372,8 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if (self.config.objspace.std.withmapdict and cls is W_ObjectObject - and not w_subtype.needsdel): - from pypy.objspace.std.mapdict import get_subclass_of_correct_size - subcls = get_subclass_of_correct_size(self, cls, w_subtype) - else: - subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, - w_subtype.layout.nslots != 0, - w_subtype.needsdel, w_subtype.weakrefable) + subcls = get_unique_interplevel_subclass( + self.config, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) @@ -543,7 +536,6 @@ return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2]) _DescrOperation_is_true = is_true - _DescrOperation_getattr = getattr def is_true(self, w_obj): # a shortcut for performance @@ -552,8 +544,6 @@ return self._DescrOperation_is_true(w_obj) def getattr(self, w_obj, w_name): - if not self.config.objspace.std.getattributeshortcut: - return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance w_type = self.type(w_obj) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -186,10 +186,9 @@ def specialized_zip_2_lists(space, w_list1, w_list2): from pypy.objspace.std.listobject import W_ListObject - if (not isinstance(w_list1, W_ListObject) or - not isinstance(w_list2, W_ListObject)): + if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject: raise OperationError(space.w_TypeError, - space.wrap("expected two lists")) + space.wrap("expected two exact lists")) if space.config.objspace.std.withspecialisedtuple: intlist1 = w_list1.getitems_int() diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -864,14 +864,3 @@ def __int__(self): return 42 raises(TypeError, bytes, A()) - - -class AppTestPrebuilt(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True} - -class AppTestShare(AppTestBytesObject): - spaceconfig = {"objspace.std.sharesmallstr": True} - -class AppTestPrebuiltShare(AppTestBytesObject): - spaceconfig = {"objspace.std.withprebuiltchar": True, - "objspace.std.sharesmallstr": True} diff --git a/pypy/objspace/std/test/test_callmethod.py b/pypy/objspace/std/test/test_callmethod.py --- a/pypy/objspace/std/test/test_callmethod.py +++ b/pypy/objspace/std/test/test_callmethod.py @@ -108,10 +108,6 @@ """) -class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestCallMethod: def test_space_call_method(self): space = self.space diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1183,11 +1183,9 @@ class Config: class objspace: class std: - withsmalldicts = False withcelldict = False - withmethodcache = False - withidentitydict = False - withmapdict = False + methodcachesizeexp = 11 + withmethodcachecounter = False FakeSpace.config = Config() From pypy.commits at gmail.com Thu Apr 28 00:21:56 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 21:21:56 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: GC headers don't need to be in the database (grafted from b05e778c895c) Message-ID: <57218fe4.08851c0a.60f40.1630@mx.google.com> Author: Ronan Lamy Branch: release-5.x Changeset: r83998:b0a649e90b66 Date: 2016-04-24 05:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b0a649e90b66/ Log: GC headers don't need to be in the database (grafted from b05e778c895c) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -547,7 +547,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None @@ -678,7 +677,6 @@ gct = self.db.gctransformer if gct is not None: self.gc_init = gct.gcheader_initdata(self.obj) - db.getcontainernode(self.gc_init) else: self.gc_init = None From pypy.commits at gmail.com Thu Apr 28 01:16:59 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Apr 2016 22:16:59 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: hg merge default (broken) Message-ID: <57219ccb.08a81c0a.d57f7.23c3@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r83999:f76e880e906f Date: 2016-04-28 04:52 +0100 http://bitbucket.org/pypy/pypy/changeset/f76e880e906f/ Log: hg merge default (broken) diff too long, truncating to 2000 out of 10591 lines diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,3 @@ +* python setup.py install in numpy does not somehow tell setuptools + it's installed (I bet it's about the py27 tag) +* reduce size of generated c code from slot definitions in slotdefs. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -12,9 +12,9 @@ The work on the cling backend has so far been done only for CPython, but bringing it to PyPy is a lot less work than developing it in the first place. -.. _Reflex: http://root.cern.ch/drupal/content/reflex -.. _CINT: http://root.cern.ch/drupal/content/cint -.. _cling: http://root.cern.ch/drupal/content/cling +.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _CINT: https://root.cern.ch/introduction-cint +.. _cling: https://root.cern.ch/cling .. _llvm: http://llvm.org/ .. _clang: http://clang.llvm.org/ @@ -283,7 +283,8 @@ core reflection set, but for the moment assume we want to have it in the reflection library that we are building for this example. -The ``genreflex`` script can be steered using a so-called `selection file`_, +The ``genreflex`` script can be steered using a so-called `selection file`_ +(see "Generating Reflex Dictionaries") which is a simple XML file specifying, either explicitly or by using a pattern, which classes, variables, namespaces, etc. to select from the given header file. @@ -305,7 +306,7 @@ -.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries +.. _selection file: https://root.cern.ch/how/how-use-reflex Now the reflection info can be generated and compiled:: @@ -811,7 +812,7 @@ immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment variable. -.. _PyROOT: http://root.cern.ch/drupal/content/pyroot +.. _PyROOT: https://root.cern.ch/pyroot There are a couple of minor differences between PyCintex and cppyy, most to do with naming. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -79,7 +79,7 @@ :doc:`Full details ` are `available here `. .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: http://root.cern.ch/drupal/content/reflex +.. _Reflex: https://root.cern.ch/how/how-use-reflex RPython Mixed Modules diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -22,3 +22,24 @@ JIT: use bitstrings to compress the lists of read or written descrs that we attach to EffectInfo. Fixes a problem we had in remove-objspace-options. + +.. branch: cpyext-for-merge +Update cpyext C-API support: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots +After this branch, we are almost able to support upstream numpy via cpyext, so +we created (yet another) fork of numpy at github.com/pypy/numpy with the needed +changes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1171,7 +1171,27 @@ return self.wrap(self.lookup(w_obj, "__call__") is not None) def issequence_w(self, w_obj): - return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return False + elif flag == 'S': + return True + else: + return (self.lookup(w_obj, '__getitem__') is not None) + + def ismapping_w(self, w_obj): + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return True + elif flag == 'S': + return False + else: + return (self.lookup(w_obj, '__getitem__') is not None and + self.lookup(w_obj, '__getslice__') is None) # The code below only works # for the simple case (new-style instance). diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -36,6 +36,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -84,11 +86,13 @@ FILEP = rffi.COpaquePtr('FILE') if sys.platform == 'win32': - fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT) + dash = '_' else: - fileno = rffi.llexternal('fileno', [FILEP], rffi.INT) - + dash = '' +fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT) fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) +fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING], + FILEP, save_err=rffi.RFFI_SAVE_ERRNO) _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) def fclose(fp): @@ -118,9 +122,11 @@ def is_valid_fp(fp): return is_valid_fd(fileno(fp)) +pypy_decl = 'pypy_decl.h' + constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES @@ -128,7 +134,7 @@ """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) -udir.join('pypy_decl.h').write("/* Will be filled later */\n") +udir.join(pypy_decl).write("/* Will be filled later */\n") udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) @@ -144,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -177,6 +195,61 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -196,6 +269,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -416,15 +492,14 @@ 'PyThread_acquire_lock', 'PyThread_release_lock', 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', - 'PyThread_ReInitTLS', + 'PyThread_ReInitTLS', 'PyThread_init_thread', + 'PyThread_start_new_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', - 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto', - 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', @@ -433,11 +508,11 @@ ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'), - '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), + '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), + '_Py_TrueStruct#%s' % pypy_decl: ('PyObject*', 'space.w_True'), + '_Py_ZeroStruct#%s' % pypy_decl: ('PyObject*', 'space.w_False'), + '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), + '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), } FORWARD_DECLS = [] @@ -466,6 +541,7 @@ "PyBytes_Type": "space.w_bytes", "PyUnicode_Type": "space.w_unicode", "PyDict_Type": "space.w_dict", + "PyDictProxy_Type": 'space.gettypeobject(cpyext.dictproxyobject.W_DictProxyObject.typedef)', "PyTuple_Type": "space.w_tuple", "PyList_Type": "space.w_list", "PySet_Type": "space.w_set", @@ -488,7 +564,7 @@ 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)', 'PyInstanceMethod_Type': 'space.gettypeobject(cpyext.classobject.InstanceMethod.typedef)', }.items(): - GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject PyDictObject'''.split(): @@ -604,7 +680,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -612,8 +695,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -622,7 +724,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -631,6 +734,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) @@ -690,8 +795,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -784,6 +901,9 @@ structindex = {} for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + # added only for the macro, not the decl + continue restype, args = c_function_signature(db, func) members.append('%s (*%s)(%s);' % (restype, name, args)) structindex[name] = len(structindex) @@ -795,12 +915,12 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols, + functions = generate_decls_and_callbacks(db, export_symbols, prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): - if "#" in name: + if '#' in name: continue if typ == 'PyDateTime_CAPI*': continue @@ -824,7 +944,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code) + eci = build_eci(True, export_symbols, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -836,7 +956,7 @@ ob = rawrefcount.next_dead(PyObject) if not ob: break - print ob + print 'deallocating PyObject', ob decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" @@ -855,8 +975,8 @@ for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) - if name.endswith('#'): - name = name[:-1] + if '#' in name: + name = name.split('#')[0] isptr = False else: isptr = True @@ -901,7 +1021,7 @@ # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): - if name.startswith('cpyext_'): # XXX hack + if name.startswith('cpyext_') or func is None: # XXX hack continue pypyAPI[structindex[name]] = ctypes.cast( ll2ctypes.lltype2ctypes(func.get_llhelper(space)), @@ -954,6 +1074,8 @@ cpyext_type_init = self.cpyext_type_init self.cpyext_type_init = None for pto, w_type in cpyext_type_init: + if space.is_w(w_type, space.w_str): + pto.c_tp_itemsize = 1 finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -971,10 +1093,14 @@ pypy_macros = [] renamed_symbols = [] for name in export_symbols: - name = name.replace("#", "") + if '#' in name: + name,header = name.split('#') + else: + header = pypy_decl newname = mangle_name(prefix, name) assert newname, name - pypy_macros.append('#define %s %s' % (name, newname)) + if header == pypy_decl: + pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) @@ -1003,7 +1129,7 @@ # implement function callbacks and generate function decls functions = [] decls = {} - pypy_decls = decls['pypy_decl.h'] = [] + pypy_decls = decls[pypy_decl] = [] pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1019,6 +1145,8 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if not func: + continue if header == DEFAULT_HEADER: _name = name else: @@ -1044,12 +1172,15 @@ functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name) for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - name = name.replace("#", "") + if '#' in name: + name, header = name.split("#") typ = typ.replace("*", "") elif name.startswith('PyExc_'): typ = 'PyObject*' - pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + header = pypy_decl + if header != pypy_decl: + decls[header].append('#define %s %s' % (name, mangle_name(prefix, name))) + decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name)) for header_name in FUNCTIONS_BY_HEADER.keys(): header = decls[header_name] @@ -1076,9 +1207,10 @@ source_dir / "pysignals.c", source_dir / "pythread.c", source_dir / "missing.c", + source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code): +def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1100,9 +1232,11 @@ # Generate definitions for global structures structs = ["#include "] + if use_micronumpy: + structs.append('#include /* api.py line 1223 */') for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - structs.append('%s %s;' % (typ[:-1], name[:-1])) + if '#' in name: + structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) elif name.startswith('PyExc_'): structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) @@ -1146,11 +1280,12 @@ use_micronumpy = space.config.objspace.usemodules.micronumpy if not use_micronumpy: return use_micronumpy - # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject - global GLOBALS, SYMBOLS_C, separate_module_files - GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + # import registers api functions by side-effect, we also need HEADER + from pypy.module.cpyext.ndarrayobject import HEADER + global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: + FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None + GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy @@ -1160,14 +1295,18 @@ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() + prefix = 'PyPy' - generate_macros(export_symbols, prefix='PyPy') + generate_macros(export_symbols, prefix=prefix) - functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix='PyPy') - code = "#include \n" + "\n".join(functions) + functions = generate_decls_and_callbacks(db, [], api_struct=False, + prefix=prefix) + code = "#include \n" + if use_micronumpy: + code += "#include /* api.py line 1290 */" + code += "\n".join(functions) - eci = build_eci(False, export_symbols, code) + eci = build_eci(False, export_symbols, code, use_micronumpy) space.fromcache(State).install_dll(eci) @@ -1179,9 +1318,14 @@ lines = ['PyObject *pypy_static_pyobjs[] = {\n'] include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - if name.endswith('#'): + if '#' in name: + name, header = name.split('#') assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') - typ, name = typ[:-1], name[:-1] + typ = typ[:-1] + if header != pypy_decl: + # since the #define is not in pypy_macros, do it here + mname = mangle_name(prefix, name) + include_lines.append('#define %s %s\n' % (name, mname)) elif name.startswith('PyExc_'): typ = 'PyTypeObject' name = '_' + name @@ -1208,14 +1352,16 @@ for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + continue newname = mangle_name('PyPy', name) or name - deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) @@ -1352,10 +1498,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,11 +2,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + make_typedescr, get_typedescr, as_pyobj, Py_IncRef) ## ## Implementation of PyBytesObject @@ -27,7 +27,7 @@ ## Solution ## -------- ## -## PyBytesObject contains two additional members: the size and a pointer to a +## PyBytesObject contains two additional members: the ob_size and a pointer to a ## char buffer; it may be NULL. ## ## - A string allocated by pypy will be converted into a PyBytesObject with a @@ -36,7 +36,7 @@ ## ## - A string allocated with PyBytes_FromStringAndSize(NULL, size) will ## allocate a PyBytesObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no +## size+1, but the reference won't be stored in the global map; there is no ## corresponding object in pypy. When from_ref() or Py_INCREF() is called, ## the pypy string is created, and added to the global map of tracked ## objects. The buffer is then supposed to be immutable. @@ -52,8 +52,8 @@ PyBytesObjectStruct = lltype.ForwardReference() PyBytesObject = lltype.Ptr(PyBytesObjectStruct) -PyBytesObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +PyBytesObjectFields = PyVarObjectFields + \ + (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyBytesObject", PyBytesObjectFields, PyBytesObjectStruct) @bootstrap_function @@ -78,10 +78,11 @@ py_str = rffi.cast(PyBytesObject, py_obj) buflen = length + 1 - py_str.c_size = length + py_str.c_ob_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw', zero=True, add_memory_pressure=True) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str def bytes_attach(space, py_obj, w_obj): @@ -90,8 +91,10 @@ buffer must not be modified. """ py_str = rffi.cast(PyBytesObject, py_obj) - py_str.c_size = len(space.bytes_w(w_obj)) + py_str.c_ob_size = len(space.str_w(w_obj)) py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL def bytes_realize(space, py_obj): """ @@ -99,8 +102,13 @@ be modified after this call. """ py_str = rffi.cast(PyBytesObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + if not py_str.c_buffer: + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1, + flavor='raw', zero=True) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrapbytes(s) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL track_reference(space, py_obj, w_obj) return w_obj @@ -157,12 +165,12 @@ ref_str.c_buffer = rffi.str2charp(s) buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 while ref_str.c_buffer[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @@ -171,7 +179,7 @@ def PyBytes_Size(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: ref = rffi.cast(PyBytesObject, ref) - return ref.c_size + return ref.c_ob_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @@ -200,7 +208,7 @@ ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize - oldsize = py_str.c_size + oldsize = py_str.c_ob_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): @@ -231,8 +239,8 @@ return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) ref[0] = make_ref(space, w_newstr) + Py_IncRef(space, ref[0]) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyBytes_ConcatAndDel(space, ref, newpart): diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -42,9 +42,15 @@ return datetimeAPI -PyDateTime_Date = PyObject -PyDateTime_Time = PyObject -PyDateTime_DateTime = PyObject +PyDateTime_DateStruct = lltype.ForwardReference() +PyDateTime_TimeStruct = lltype.ForwardReference() +PyDateTime_DateTimeStruct = lltype.ForwardReference() +cpython_struct("PyDateTime_Date", PyObjectFields, PyDateTime_DateStruct) +PyDateTime_Date = lltype.Ptr(PyDateTime_DateStruct) +cpython_struct("PyDateTime_Time", PyObjectFields, PyDateTime_TimeStruct) +PyDateTime_Time = lltype.Ptr(PyDateTime_TimeStruct) +cpython_struct("PyDateTime_DateTime", PyObjectFields, PyDateTime_DateTimeStruct) +PyDateTime_DateTime = lltype.Ptr(PyDateTime_DateTimeStruct) PyDeltaObjectStruct = lltype.ForwardReference() cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct) diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -1,16 +1,51 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, cpython_api, cpython_struct, PyObject, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.objspace.std.complexobject import W_ComplexObject from pypy.interpreter.error import OperationError PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex") -Py_complex_t = lltype.ForwardReference() +Py_complex_t = rffi.CStruct('Py_complex_t', + ('real', rffi.DOUBLE), + ('imag', rffi.DOUBLE), + hints={'size': 2 * rffi.sizeof(rffi.DOUBLE)}) Py_complex_ptr = lltype.Ptr(Py_complex_t) -Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE)) -cpython_struct("Py_complex", Py_complex_fields, Py_complex_t) + +PyComplexObjectStruct = lltype.ForwardReference() +PyComplexObject = lltype.Ptr(PyComplexObjectStruct) +PyComplexObjectFields = PyObjectFields + \ + (("cval", Py_complex_t),) +cpython_struct("PyComplexObject", PyComplexObjectFields, PyComplexObjectStruct) + + at bootstrap_function +def init_complexobject(space): + "Type description of PyComplexObject" + make_typedescr(space.w_complex.layout.typedef, + basestruct=PyComplexObject.TO, + attach=complex_attach, + realize=complex_realize) + +def complex_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyComplexObject with the given complex object. The + value must not be modified. + """ + assert isinstance(w_obj, W_ComplexObject) + py_obj = rffi.cast(PyComplexObject, py_obj) + py_obj.c_cval.c_real = w_obj.realval + py_obj.c_cval.c_imag = w_obj.imagval + +def complex_realize(space, obj): + py_obj = rffi.cast(PyComplexObject, obj) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_ComplexObject, w_type) + w_obj.__init__(py_obj.c_cval.c_real, py_obj.c_cval.c_imag) + track_reference(space, obj, w_obj) + return w_obj @cpython_api([lltype.Float, lltype.Float], PyObject) diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -23,6 +23,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) @@ -62,6 +63,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) @@ -104,6 +106,32 @@ """ return space.call_method(space.w_dict, "copy", w_obj) +def _has_val(space, w_dict, w_key): + try: + w_val = space.getitem(w_dict, w_key) + except OperationError as e: + if e.match(space, space.w_KeyError): + return False + else: + raise + return True + + at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) +def PyDict_Merge(space, w_a, w_b, override): + """Iterate over mapping object b adding key-value pairs to dictionary a. + b may be a dictionary, or any object supporting PyMapping_Keys() + and PyObject_GetItem(). If override is true, existing pairs in a + will be replaced if a matching key is found in b, otherwise pairs will + only be added if there is not a matching key in a. Return 0 on + success or -1 if an exception was raised. + """ + override = rffi.cast(lltype.Signed, override) + w_keys = space.call_method(w_b, "keys") + for w_key in space.iteriterable(w_keys): + if not _has_val(space, w_a, w_key) or override != 0: + space.setitem(w_a, w_key, space.getitem(w_b, w_key)) + return 0 + @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_Update(space, w_obj, w_other): """This is the same as PyDict_Merge(a, b, 1) in C, or a.update(b) in diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -1,8 +1,42 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, + cpython_struct, CANNOT_FAIL, cpython_api, PyObject, build_type_checkers, CONST_STRING) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.interpreter.error import OperationError from rpython.rlib.rstruct import runpack +from pypy.objspace.std.floatobject import W_FloatObject + +PyFloatObjectStruct = lltype.ForwardReference() +PyFloatObject = lltype.Ptr(PyFloatObjectStruct) +PyFloatObjectFields = PyObjectFields + \ + (("ob_fval", rffi.DOUBLE),) +cpython_struct("PyFloatObject", PyFloatObjectFields, PyFloatObjectStruct) + + at bootstrap_function +def init_floatobject(space): + "Type description of PyFloatObject" + make_typedescr(space.w_float.layout.typedef, + basestruct=PyFloatObject.TO, + attach=float_attach, + realize=float_realize) + +def float_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyFloatObject with the given float object. The + value must not be modified. + """ + py_float = rffi.cast(PyFloatObject, py_obj) + py_float.c_ob_fval = space.float_w(w_obj) + +def float_realize(space, obj): + floatval = rffi.cast(lltype.Float, rffi.cast(PyFloatObject, obj).c_ob_fval) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_FloatObject, w_type) + w_obj.__init__(floatval) + track_reference(space, obj, w_obj) + return w_obj PyFloat_Check, PyFloat_CheckExact = build_type_checkers("Float") diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -120,9 +120,11 @@ #include "intobject.h" #include "longobject.h" #include "listobject.h" +#include "longobject.h" #include "unicodeobject.h" #include "compile.h" #include "frameobject.h" +#include "memoryobject.h" #include "eval.h" #include "pymem.h" #include "pycobject.h" diff --git a/pypy/module/cpyext/include/bytesobject.h b/pypy/module/cpyext/include/bytesobject.h --- a/pypy/module/cpyext/include/bytesobject.h +++ b/pypy/module/cpyext/include/bytesobject.h @@ -1,5 +1,4 @@ - -/* String object interface */ +/* A copy of pypy2's PyStringObject */ #ifndef Py_BYTESOBJECT_H #define Py_BYTESOBJECT_H @@ -7,15 +6,61 @@ extern "C" { #endif +#include + #define PyBytes_GET_SIZE(op) PyBytes_Size(op) #define PyBytes_AS_STRING(op) PyBytes_AsString(op) +/* +Type PyStringObject represents a character string. An extra zero byte is +reserved at the end to ensure it is zero-terminated, but a size is +present so strings with null bytes in them can be represented. This +is an immutable object type. + +There are functions to create new string objects, to test +an object for string-ness, and to get the +string value. The latter function returns a null pointer +if the object is not of the proper type. +There is a variant that takes an explicit size as well as a +variant that assumes a zero-terminated string. Note that none of the +functions should be applied to nil objects. +*/ + +/* Caching the hash (ob_shash) saves recalculation of a string's hash value. + Interning strings (ob_sstate) tries to ensure that only one string + object with a given value exists, so equality tests can be one pointer + comparison. This is generally restricted to strings that "look like" + Python identifiers, although the intern() builtin can be used to force + interning of any string. + Together, these sped cpython up by up to 20%, and since they are part of the + "public" interface PyPy must reimpliment them. */ + + + typedef struct { - PyObject_HEAD - char* buffer; - Py_ssize_t size; + PyObject_VAR_HEAD + long ob_shash; + int ob_sstate; + char * buffer; /* change the name from cpython so all non-api c access is thwarted */ + + /* Invariants + * (not relevant in PyPy, all stringobjects are backed by a pypy object) + * buffer contains space for 'ob_size+1' elements. + * buffer[ob_size] == 0. + * ob_shash is the hash of the string or -1 if not computed yet. + * ob_sstate != 0 iff the string object is in stringobject.c's + * 'interned' dictionary; in this case the two references + * from 'interned' to this object are *not counted* in ob_refcnt. + */ + } PyBytesObject; +#define SSTATE_NOT_INTERNED 0 +#define SSTATE_INTERNED_MORTAL 1 +#define SSTATE_INTERNED_IMMORTAL 2 +#define PyString_CHECK_INTERNED(op) (((PyStringObject *)(op))->ob_sstate) + + #define PyByteArray_Check(obj) \ PyObject_IsInstance(obj, (PyObject *)&PyByteArray_Type) diff --git a/pypy/module/cpyext/include/complexobject.h b/pypy/module/cpyext/include/complexobject.h --- a/pypy/module/cpyext/include/complexobject.h +++ b/pypy/module/cpyext/include/complexobject.h @@ -6,14 +6,16 @@ extern "C" { #endif -/* fake PyComplexObject so that code that doesn't do direct field access works */ -#define PyComplexObject PyObject - typedef struct Py_complex_t { double real; double imag; } Py_complex; +typedef struct { + PyObject_HEAD + Py_complex cval; +} PyComplexObject; + /* generated function */ PyAPI_FUNC(int) _PyComplex_AsCComplex(PyObject *, Py_complex *); PyAPI_FUNC(PyObject *) _PyComplex_FromCComplex(Py_complex *); diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h --- a/pypy/module/cpyext/include/datetime.h +++ b/pypy/module/cpyext/include/datetime.h @@ -24,6 +24,18 @@ PyObject_HEAD } PyDateTime_Delta; +typedef struct { + PyObject_HEAD +} PyDateTime_Date; + +typedef struct { + PyObject_HEAD +} PyDateTime_Time; + +typedef struct { + PyObject_HEAD +} PyDateTime_DateTime; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/descrobject.h b/pypy/module/cpyext/include/descrobject.h --- a/pypy/module/cpyext/include/descrobject.h +++ b/pypy/module/cpyext/include/descrobject.h @@ -12,4 +12,34 @@ } PyGetSetDef; +#define PyDescr_COMMON \ + PyObject_HEAD \ + PyTypeObject *d_type; \ + PyObject *d_name + +typedef struct { + PyDescr_COMMON; +} PyDescrObject; + +typedef struct { + PyDescr_COMMON; + PyMethodDef *d_method; +} PyMethodDescrObject; + +typedef struct { + PyDescr_COMMON; + struct PyMemberDef *d_member; +} PyMemberDescrObject; + +typedef struct { + PyDescr_COMMON; + PyGetSetDef *d_getset; +} PyGetSetDescrObject; + +typedef struct { + PyDescr_COMMON; + struct wrapperbase *d_base; + void *d_wrapped; /* This can be any function pointer */ +} PyWrapperDescrObject; + #endif diff --git a/pypy/module/cpyext/include/floatobject.h b/pypy/module/cpyext/include/floatobject.h --- a/pypy/module/cpyext/include/floatobject.h +++ b/pypy/module/cpyext/include/floatobject.h @@ -3,10 +3,22 @@ #ifndef Py_FLOATOBJECT_H #define Py_FLOATOBJECT_H + +#ifdef _MSC_VER +#include +#include +#define copysign _copysign +#endif + #ifdef __cplusplus extern "C" { #endif +typedef struct { + PyObject_HEAD + double ob_fval; +} PyFloatObject; + #define PyFloat_STR_PRECISION 12 #ifdef Py_NAN diff --git a/pypy/module/cpyext/include/longobject.h b/pypy/module/cpyext/include/longobject.h --- a/pypy/module/cpyext/include/longobject.h +++ b/pypy/module/cpyext/include/longobject.h @@ -1,12 +1,20 @@ - -/* Int object interface */ - #ifndef Py_LONGOBJECT_H #define Py_LONGOBJECT_H + +#include + #ifdef __cplusplus extern "C" { #endif +/* why does cpython redefine these, and even supply an implementation in mystrtoul.c? +PyAPI_FUNC(unsigned long) PyOS_strtoul(const char *, char **, int); +PyAPI_FUNC(long) PyOS_strtol(const char *, char **, int); +*/ + +#define PyOS_strtoul strtoul +#define PyOS_strtol strtoul + #define PyLong_AS_LONG(op) PyLong_AsLong(op) #ifdef __cplusplus diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/memoryobject.h @@ -0,0 +1,14 @@ +#ifndef Py_MEMORYOBJECT_H +#define Py_MEMORYOBJECT_H + +#ifdef __cplusplus +extern "C" { +#endif + + + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_MEMORYOBJECT_H */ diff --git a/pypy/module/cpyext/include/numpy/README b/pypy/module/cpyext/include/numpy/README new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/README @@ -0,0 +1,8 @@ +headers for the micronumpy multiarray and umath modules, +as used by https://bitbucket.org/pypy/numpy. They are needed by +downstream packages that depend on numpy, like matplotlib, but can +be slightly non-compatible with traditional numpy C-API use cases. + +The trick to including these headers is in get_include, located in +numpy/lib/utils.py. They will be ignored by an upstream build of numpy +since the /numpy/core/include path will be used instead diff --git a/pypy/module/cpyext/include/numpy/__multiarray_api.h b/pypy/module/cpyext/include/numpy/__multiarray_api.h new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/include/numpy/__multiarray_api.h @@ -0,0 +1,11 @@ + + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +static int import_array(){return 0;}; +static int _import_array(){return 0;}; +static int _import_math(){return 0;}; + diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -1,6 +1,8 @@ -/* NDArray object interface - S. H. Muller, 2013/07/26 */ -/* For testing ndarrayobject only */ +/* NDArray object interface - S. H. Muller, 2013/07/26 + * It will be copied by numpy/core/setup.py by install_data to + * site-packages/numpy/core/includes/numpy +*/ #ifndef Py_NDARRAYOBJECT_H #define Py_NDARRAYOBJECT_H @@ -8,8 +10,14 @@ extern "C" { #endif +#include "pypy_numpy.h" +#include "old_defines.h" #include "npy_common.h" -#include "ndarraytypes.h" +#include "__multiarray_api.h" + +#define NPY_UNUSED(x) x +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) /* fake PyArrayObject so that code that doesn't do direct field access works */ #define PyArrayObject PyObject @@ -17,20 +25,206 @@ PyAPI_DATA(PyTypeObject) PyArray_Type; -#define PyArray_SimpleNew _PyArray_SimpleNew -#define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto -#define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define NPY_MAXDIMS 32 -/* functions defined in ndarrayobject.c*/ +#ifndef NDARRAYTYPES_H +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +/* data types copied from numpy/ndarraytypes.h + * keep numbers in sync with micronumpy.interp_dtype.DTypeCache + */ +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21 +}; + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyArray_ISBOOL(arr) (PyTypeNum_ISBOOL(PyArray_TYPE(arr))) +#define PyArray_ISINTEGER(arr) (PyTypeNum_ISINTEGER(PyArray_TYPE(arr))) +#define PyArray_ISFLOAT(arr) (PyTypeNum_ISFLOAT(PyArray_TYPE(arr))) +#define PyArray_ISCOMPLEX(arr) (PyTypeNum_ISCOMPLEX(PyArray_TYPE(arr))) + + +/* flags */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 +#define NPY_ARRAY_OWNDATA 0x0004 +#define NPY_ARRAY_FORCECAST 0x0010 +#define NPY_ARRAY_ENSURECOPY 0x0020 +#define NPY_ARRAY_ENSUREARRAY 0x0040 +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 +#define NPY_ARRAY_ALIGNED 0x0100 +#define NPY_ARRAY_NOTSWAPPED 0x0200 +#define NPY_ARRAY_WRITEABLE 0x0400 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_UPDATEIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +#define NPY_FARRAY NPY_ARRAY_FARRAY +#define NPY_CARRAY NPY_ARRAY_CARRAY + +#define PyArray_CHKFLAGS(m, flags) (PyArray_FLAGS(m) & (flags)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + +#define PyArray_ISONESEGMENT(arr) (1) +#define PyArray_ISNOTSWAPPED(arr) (1) +#define PyArray_ISBYTESWAPPED(arr) (0) + +#endif + +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX32 NPY_CFLOAT +#define NPY_COMPLEX64 NPY_CDOUBLE + + +/* functions */ +#ifndef PyArray_NDIM + +#define PyArray_Check _PyArray_Check +#define PyArray_CheckExact _PyArray_CheckExact +#define PyArray_FLAGS _PyArray_FLAGS + +#define PyArray_NDIM _PyArray_NDIM +#define PyArray_DIM _PyArray_DIM +#define PyArray_STRIDE _PyArray_STRIDE +#define PyArray_SIZE _PyArray_SIZE +#define PyArray_ITEMSIZE _PyArray_ITEMSIZE +#define PyArray_NBYTES _PyArray_NBYTES +#define PyArray_TYPE _PyArray_TYPE +#define PyArray_DATA _PyArray_DATA + +#define PyArray_Size PyArray_SIZE +#define PyArray_BYTES(arr) ((char *)PyArray_DATA(arr)) + +#define PyArray_FromAny _PyArray_FromAny +#define PyArray_FromObject _PyArray_FromObject +#define PyArray_ContiguousFromObject PyArray_FromObject +#define PyArray_ContiguousFromAny PyArray_FromObject + +#define PyArray_FROMANY(obj, typenum, min, max, requirements) (obj) +#define PyArray_FROM_OTF(obj, typenum, requirements) \ + PyArray_FromObject(obj, typenum, 0, 0) + +#define PyArray_New _PyArray_New +#define PyArray_SimpleNew _PyArray_SimpleNew +#define PyArray_SimpleNewFromData _PyArray_SimpleNewFromData +#define PyArray_SimpleNewFromDataOwning _PyArray_SimpleNewFromDataOwning + +#define PyArray_EMPTY(nd, dims, type_num, fortran) \ + PyArray_SimpleNew(nd, dims, type_num) PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); -PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); +#define PyArray_FILLWBYTE _PyArray_FILLWBYTE +#define PyArray_ZEROS _PyArray_ZEROS +#define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) + +/* Don't use these in loops! */ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0))) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1))) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2))) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDE(obj,0) + \ + (j)*PyArray_STRIDE(obj,1) + \ + (k)*PyArray_STRIDE(obj,2) + \ + (l)*PyArray_STRIDE(obj,3))) + +#endif #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/numpy/ndarraytypes.h b/pypy/module/cpyext/include/numpy/ndarraytypes.h --- a/pypy/module/cpyext/include/numpy/ndarraytypes.h +++ b/pypy/module/cpyext/include/numpy/ndarraytypes.h @@ -1,9 +1,69 @@ #ifndef NDARRAYTYPES_H #define NDARRAYTYPES_H -/* For testing ndarrayobject only */ +#include "numpy/npy_common.h" +//#include "npy_endian.h" +//#include "npy_cpu.h" +//#include "utils.h" -#include "numpy/npy_common.h" +//for pypy - numpy has lots of typedefs +//for pypy - make life easier, less backward support +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_NO_DEPRECATED_API NPY_1_8_API_VERSION +#undef NPY_1_8_API_VERSION + +#define NPY_ENABLE_SEPARATE_COMPILATION 1 +#define NPY_VISIBILITY_HIDDEN + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + + + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, @@ -31,6 +91,18 @@ NPY_NTYPES_ABI_COMPATIBLE=21 }; +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + /* * These characters correspond to the array type and the struct * module @@ -85,6 +157,27 @@ }; typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0, +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { NPY_NOSCALAR=-1, NPY_BOOL_SCALAR, NPY_INTPOS_SCALAR, @@ -93,6 +186,7 @@ NPY_COMPLEX_SCALAR, NPY_OBJECT_SCALAR } NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) /* For specifying array memory layout or iteration order */ typedef enum { @@ -106,6 +200,729 @@ NPY_KEEPORDER=2 } NPY_ORDER; +/* For specifying allowed casting in operations which support it */ +typedef enum { + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, + + /* + * Temporary internal definition only, will be removed in upcoming + * release, see below + * */ + NPY_INTERNAL_UNSAFE_CASTING_BUT_WARN_UNLESS_SAME_KIND = 100, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1) + +typedef enum { + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10,/* nanoseconds */ + NPY_FR_ps = 11,/* picoseconds */ + NPY_FR_fs = 12,/* femtoseconds */ + NPY_FR_as = 13,/* attoseconds */ + NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ + + /* Data buffer - PyDataMem_NEW/FREE/RENEW are in multiarraymodule.c */ + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); +typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); +typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp, + npy_intp *, npy_intp *, + void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD From pypy.commits at gmail.com Thu Apr 28 02:24:08 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 27 Apr 2016 23:24:08 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: fix problems with __del__: only make a single RPython subclass if the base Message-ID: <5721ac88.26b0c20a.cb528.ffff8c4b@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r84000:dafa64985e85 Date: 2016-04-28 09:23 +0300 http://bitbucket.org/pypy/pypy/changeset/dafa64985e85/ Log: fix problems with __del__: only make a single RPython subclass if the base class already has a del diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -383,6 +383,26 @@ assert not hasattr(b, "storage") assert hasattr(c, "storage") + def test_del(self): + space = self.space + a, b, c, d = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + class B(object): + def __del__(self): + pass + class F(file): + pass + class G(file): + def __del__(self): + pass + return A(), B(), F("xyz", "w"), G("ghi", "w") + """)) + assert type(b).__base__ is type(a) + assert hasattr(c, "__del__") + assert type(d) is type(c) + + class AppTestTypeDef: def setup_class(cls): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -112,12 +112,18 @@ try: return _subclass_cache[key] except KeyError: - # XXX can save a class if cls already has a __del__ - if needsdel: + keys = [key] + base_has_del = hasattr(cls, '__del__') + if base_has_del: + # if the base has a __del__, we only need one class + keys = [(config, cls, True), (config, cls, False)] + needsdel = True + elif needsdel: cls = get_unique_interplevel_subclass(config, cls, False) subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache - _subclass_cache[key] = subcls + for key in keys: + _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} @@ -130,17 +136,19 @@ MapdictDictSupport, _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef - name = cls.__name__ + "User" - - mixins_needed = [BaseUserClassMapdict] - if cls is W_ObjectObject or cls is W_InstanceObject: - mixins_needed.append(_make_storage_mixin_size_n()) - else: - mixins_needed.append(MapdictStorageMixin) - if reallywantdict or not typedef.hasdict: - # the type has no dict, mapdict to provide the dict - mixins_needed.append(MapdictDictSupport) - name += "Dict" + mixins_needed = [] + name = cls.__name__ + if not cls.user_overridden_class: + mixins_needed.append(BaseUserClassMapdict) + name += "User" + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" if wants_del: name += "Del" parent_destructor = getattr(cls, '__del__', None) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -197,7 +197,7 @@ self.cls_without_del = _getusercls( space.config, W_InstanceObject, False, reallywantdict=True) self.cls_with_del = _getusercls( - space.config, W_InstanceObject, True, reallywantdict=True) + space.config, self.cls_without_del, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): From pypy.commits at gmail.com Thu Apr 28 03:45:53 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 00:45:53 -0700 (PDT) Subject: [pypy-commit] pypy default: I'm really often hitting this issue where I get Message-ID: <5721bfb1.22d8c20a.8d802.ffffa103@mx.google.com> Author: Armin Rigo Branch: Changeset: r84001:9d9245696120 Date: 2016-04-28 09:45 +0200 http://bitbucket.org/pypy/pypy/changeset/9d9245696120/ Log: I'm really often hitting this issue where I get AttributeError: install_layout in __getattr__(). I think it might be caused by some Debian-ism but I'm not sure what is going on, because I keep getting it on a fresh install of the official pypy's. I usually fix it with this "except AttributeError", which I'm now checking in. diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -298,8 +298,11 @@ src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: - setattr(self, dst_option, - getattr(src_cmd_obj, src_option)) + try: + setattr(self, dst_option, + getattr(src_cmd_obj, src_option)) + except AttributeError: + pass def get_finalized_command(self, command, create=1): From pypy.commits at gmail.com Thu Apr 28 04:23:58 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 01:23:58 -0700 (PDT) Subject: [pypy-commit] pypy default: Add comments Message-ID: <5721c89e.8a37c20a.31ded.ffffb947@mx.google.com> Author: Armin Rigo Branch: Changeset: r84002:b64c8f09635f Date: 2016-04-28 10:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b64c8f09635f/ Log: Add comments diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -302,6 +302,11 @@ setattr(self, dst_option, getattr(src_cmd_obj, src_option)) except AttributeError: + # This was added after problems with setuptools 18.4. + # It seems that setuptools 20.9 fixes the problem. + # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv + # if I say "virtualenv -p pypy venv-pypy" then it + # just installs setuptools 18.4 from some cache... pass From pypy.commits at gmail.com Thu Apr 28 04:52:17 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 28 Apr 2016 01:52:17 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add a talk Message-ID: <5721cf41.81da1c0a.6564a.7611@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5638:8dca1d8207b6 Date: 2016-04-28 09:50 +0300 http://bitbucket.org/pypy/extradoc/changeset/8dca1d8207b6/ Log: add a talk diff --git a/talk/pycon-il-2016/Makefile b/talk/pycon-il-2016/Makefile new file mode 100644 --- /dev/null +++ b/talk/pycon-il-2016/Makefile @@ -0,0 +1,13 @@ +# pip install rst2beamer + +# WARNING: to work, it needs this patch for docutils +# https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 + +talk.pdf: talk.tex + pdflatex talk.tex + + +talk.tex: talk.rst author.latex stylesheet.latex + rst2beamer --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< > talk.tex + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + diff --git a/talk/pycon-il-2016/author.latex b/talk/pycon-il-2016/author.latex new file mode 100644 --- /dev/null +++ b/talk/pycon-il-2016/author.latex @@ -0,0 +1,9 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[PyPy 2016]{PyPy 2016} +\author[mattip] +{Matti Picus\\ +\includegraphics[width=80px]{../img/py-web-new.png}} + +\institute{Scipy Israel 2016} +\date{May 2nd, 2016} diff --git a/talk/pycon-il-2016/beamerdefs.txt b/talk/pycon-il-2016/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/pycon-il-2016/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/pycon-il-2016/speed.png b/talk/pycon-il-2016/speed.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..63b771ce59358bbcb28efbba84a43f03328b4554 GIT binary patch [cut] diff --git a/talk/pycon-il-2016/stylesheet.latex b/talk/pycon-il-2016/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/pycon-il-2016/stylesheet.latex @@ -0,0 +1,9 @@ +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/pycon-il-2016/talk.rst b/talk/pycon-il-2016/talk.rst new file mode 100644 --- /dev/null +++ b/talk/pycon-il-2016/talk.rst @@ -0,0 +1,252 @@ +.. include:: beamerdefs.txt + +========= +PyPy 2016 +========= + +Introduction +------------ + +Python is + +* A syntax or two + +* An interpreter + +* A set of standard libraries shipped with the interpreter + +* A vibrant number of communitis that shares code + +PyPy +---- + +* PyPy is an interpreter written in RPython + +* Speed is one of its main advantages + +* Compatible (mostly) + +Speed +----- + +.. image:: speed.png + :scale: 50% + :align: center + +How ? +----- + +* Tracing Just-In-Time compiler + +* Optimizes loops + +* Traces one iteration of a loop + +* Produces a linear trace of execution + +* Inlines almost everything + +* The trace is then optimized and compiled + +* Removes overhead + +Prove It +-------- + +Techniques to achieve performant Python +--------------------------------------- + +* Write better code + + - string concatenation + + - attribute lookup + +* Rewrite your code in C + +* Rewrite your code in Cython + +* Add accelators like Numba + +* Use PyPy + +Why not PyPy? +------------- + +* Python III + +* Third-party library support + +PyPy and C (1/2) +---------------- + +* PyPy and CFFI + +* CFFI is the easiest tool to I've used so far + +* Very fast on PyPy, fast enough on CPython + +* Used by NumPyPy + +* Use CFFI to call python from C + + - This means you can create your own C API in pure Python ! + +PyPy and C (2/2) +---------------- + +* CFFI enables embedded Python (and PyPy) in a C application (uWSGI) + +* What about C-API (glad you asked) + +* Actively worked on right now + +Python C API +------------ + +* Leaks way too many implementation details (refcounting, PyObject structure fields) + +* Makes it hard to improve Python while supporting 100% of the API + +* PyPy 5.0 introduced a major rewrite + +* Hint - good things are coming + +NumPyPy +------- + +* https://bitbucket.org/pypy/numpy + pypy + +* I have been working on it since 2011 + +* Replaces ndarray, umath with builtin modules + +* ~85% of the numpy tests are passing, on all platforms + +* Most of numpy is there: object dtypes, ufuncs + +* linalg, fft, random all via cffi + +NumPyPy performance +------------------- + +* Should be as fast as Numpy, faster for smaller arrays + +* Lazy evaluation ? + +* But what about SciPy? + +PyMetabiosis +------------ + +* https://github.com/rguillebert/pymetabiosis + +* Work in progress + +* Allows you to use any CPython module on PyPy (scipy for example) + +* Embeds CPython into PyPy with CFFI + +* Numpy arrays can be shared between PyPy and CPython + +PyMetabiosis +------------ + +|scriptsize| + +.. sourcecode:: python + + from pymetabiosis import import_module + + cpython_virtualenv_path = + "/tmp/venv/bin/activate_this.py" + + builtin = import_module("__builtin__") + + # Activate a virtualenv for the cpython interpreter + builtin.execfile(cpython_virtualenv_path, + {"__file__" : cpython_virtualenv_path} + ) + + pylab = import_module("matplotlib.pylab") + + pylab.plot([1, 2, 3, 4]) + pylab.show() + +|end_scriptsize| + +JitPy +----- + +* http://jitpy.readthedocs.io + +* Proof of concept (Maciej Fijałkowski) + +* Embeds PyPy into CPython + +* Provides a decorator that allows you to run specific functions on PyPy + +* Is used the same way as numba, but different performance characteristics + + +JitPy +----- + +|scriptsize| + +.. sourcecode:: python + + import numpy as np + from jitpy import setup + setup('') + from jitpy.wrapper import jittify + + @jittify(['array', float], float) + def f(a, s): + r = 0 + for i in xrange(a.shape[0]): + r += a[i] * s + return s + func(np.arange(10000), 1.2) + +|end_scriptsize| + +Future - wouldn't it be great if +-------------------------------- + +* Improved C extension compatibility + +* Native Numpy + Scipy + ... + + +The Future is Now! +------------------ + +* (Applause) + +* Native numpy (tweaked) passes 90% of tests + +* How to leverage the JIT? + +Why this makes sense +-------------------- + +* Advantages and disadvantages of RPython + +* Advantages of a JIT (vectorization) + +* Leveraging this for other dynamic languages + +Takeaway +-------- + +* Get PyPy at pypy.org (or from your favorite distribution) + +* Try it + +* Give us feedback (good or bad) + +Thank You +--------- + +Questions ? From pypy.commits at gmail.com Thu Apr 28 04:52:19 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 28 Apr 2016 01:52:19 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: add pdf Message-ID: <5721cf43.a82cc20a.faa2b.ffffc610@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5639:392a67457b7d Date: 2016-04-28 09:52 +0300 http://bitbucket.org/pypy/extradoc/changeset/392a67457b7d/ Log: add pdf diff --git a/talk/pycon-il-2016/Makefile b/talk/pycon-il-2016/Makefile --- a/talk/pycon-il-2016/Makefile +++ b/talk/pycon-il-2016/Makefile @@ -8,6 +8,6 @@ talk.tex: talk.rst author.latex stylesheet.latex - rst2beamer --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< > talk.tex + rst2beamer --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< > talk.latex sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit diff --git a/talk/pycon-il-2016/talk.pdf b/talk/pycon-il-2016/talk.pdf new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..27f0b7ce06dde8f973e1eb3c65f491c87a2d1604 GIT binary patch [cut] From pypy.commits at gmail.com Thu Apr 28 04:52:21 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 28 Apr 2016 01:52:21 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: update Message-ID: <5721cf45.46291c0a.e3346.4d2c@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5640:e8ac394f3c2e Date: 2016-04-28 11:51 +0300 http://bitbucket.org/pypy/extradoc/changeset/e8ac394f3c2e/ Log: update diff --git a/talk/pycon-il-2016/Makefile b/talk/pycon-il-2016/Makefile --- a/talk/pycon-il-2016/Makefile +++ b/talk/pycon-il-2016/Makefile @@ -1,13 +1,10 @@ # pip install rst2beamer -# WARNING: to work, it needs this patch for docutils -# https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 +talk.pdf: talk.latex + pdflatex talk.latex -talk.pdf: talk.tex - pdflatex talk.tex - -talk.tex: talk.rst author.latex stylesheet.latex - rst2beamer --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< > talk.latex +talk.latex: talk.rst author.latex stylesheet.latex *.png + rst2beamer --stylesheet=stylesheet.latex --documentoptions=12pt --input-encoding=utf8 --output-encoding=utf8 $< > talk.latex sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit diff --git a/talk/pycon-il-2016/ndarray.png b/talk/pycon-il-2016/ndarray.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..30c7c55cd501c36f9d322596b3a481c0de63a9fc GIT binary patch [cut] diff --git a/talk/pycon-il-2016/speed.png b/talk/pycon-il-2016/speed.png index 63b771ce59358bbcb28efbba84a43f03328b4554..d4a72a103291365508bc5813d04a6acbc1d9f402 GIT binary patch [cut] diff --git a/talk/pycon-il-2016/talk.pdf b/talk/pycon-il-2016/talk.pdf index 27f0b7ce06dde8f973e1eb3c65f491c87a2d1604..51c1443ef77403e7affefca9ed3b4d748cf0c7c2 GIT binary patch [cut] diff --git a/talk/pycon-il-2016/talk.rst b/talk/pycon-il-2016/talk.rst --- a/talk/pycon-il-2016/talk.rst +++ b/talk/pycon-il-2016/talk.rst @@ -4,54 +4,18 @@ PyPy 2016 ========= -Introduction ------------- +Python means four things: +------------------------- Python is -* A syntax or two +* A syntax or six `(2 * 3 = 6)` -* An interpreter +* An interpreter to run code written in the syntax * A set of standard libraries shipped with the interpreter -* A vibrant number of communitis that shares code - -PyPy ----- - -* PyPy is an interpreter written in RPython - -* Speed is one of its main advantages - -* Compatible (mostly) - -Speed ------ - -.. image:: speed.png - :scale: 50% - :align: center - -How ? ------ - -* Tracing Just-In-Time compiler - -* Optimizes loops - -* Traces one iteration of a loop - -* Produces a linear trace of execution - -* Inlines almost everything - -* The trace is then optimized and compiled - -* Removes overhead - -Prove It --------- +* A vibrant number of communities that share code Techniques to achieve performant Python --------------------------------------- @@ -70,33 +34,100 @@ * Use PyPy +PyPy +---- + +* PyPy is an interpreter written in RPython + +* It ships with the standard library + +* Speed is one of its main advantages + +* Compatible (mostly) via pip install + +* Not the only alternative interpreter + +Speed (Applause) +---------------- + +.. image:: speed.png + :scale: 50% + :align: center + +Speed continued +--------------- + +* Benchmarking, statistics, politics + +* Did I mention warmup time? + +How ? +----- + +* Tracing Just-In-Time compiler + +* Optimizes loops + +* Traces one iteration of a loop + +* Produces a linear trace of execution + +* The trace is then **optimized** and compiled + +Why is this fast? +----------------- + +* Inlining + +* Promotion + +* Unrolling + +* Strategies + + - Convert sequences to arrays + + - Vectorization + +Prove It +-------- + +* profiling + +* jitviewer + Why not PyPy? ------------- -* Python III +* Python and the community * Third-party library support -PyPy and C (1/2) +* No easy packaging (like Winpython or Anaconda) + + - Opportunity??? + +PyPy and C (1/3) ---------------- -* PyPy and CFFI +* PyPy and CFFI (Armin Rigo, Maciej Fijałkowski) -* CFFI is the easiest tool to I've used so far - -* Very fast on PyPy, fast enough on CPython - -* Used by NumPyPy +* CFFI is easy, just massage the headers and that's it * Use CFFI to call python from C - This means you can create your own C API in pure Python ! -PyPy and C (2/2) +PyPy and C (2/3) ---------------- * CFFI enables embedded Python (and PyPy) in a C application (uWSGI) +* Very fast on PyPy, fast enough on CPython + +PyPy and C (3/3) +---------------- + * What about C-API (glad you asked) * Actively worked on right now @@ -106,6 +137,8 @@ * Leaks way too many implementation details (refcounting, PyObject structure fields) +* C allows you to cheat (private, read-only) + * Makes it hard to improve Python while supporting 100% of the API * PyPy 5.0 introduced a major rewrite @@ -117,7 +150,7 @@ * https://bitbucket.org/pypy/numpy + pypy -* I have been working on it since 2011 +* I have been working on it since 2011, together with many others * Replaces ndarray, umath with builtin modules @@ -126,11 +159,21 @@ * Most of numpy is there: object dtypes, ufuncs * linalg, fft, random all via cffi +* Should be as fast as Numpy, faster for smaller arrays NumPyPy performance ------------------- -* Should be as fast as Numpy, faster for smaller arrays +* From http://rpubs.com/mikefc/60129 + +.. image:: ndarray.png + :scale: 35% + :align: center + +* numpypy in blue, numpy in red + +NumPyPy future +-------------- * Lazy evaluation ? @@ -141,7 +184,7 @@ * https://github.com/rguillebert/pymetabiosis -* Work in progress +* Proof of concept (Romain Guillebert) * Allows you to use any CPython module on PyPy (scipy for example) @@ -226,12 +269,12 @@ * Native numpy (tweaked) passes 90% of tests -* How to leverage the JIT? +* How to leverage the JIT and NumPyPy? Why this makes sense -------------------- -* Advantages and disadvantages of RPython +* Advantages of RPython * Advantages of a JIT (vectorization) @@ -242,11 +285,21 @@ * Get PyPy at pypy.org (or from your favorite distribution) -* Try it +* Use it in a virtualenv -* Give us feedback (good or bad) +* Give us feedback (good or bad) #pypy on IRC -Thank You +Thank You --------- -Questions ? +Questions ? Examples: + +* What about this other interpreter I heard of? + +* How can I get involved? + +* What about commercial involvement? + +* How can I get support? + +* What about Python 3.5? From pypy.commits at gmail.com Thu Apr 28 05:03:20 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 28 Apr 2016 02:03:20 -0700 (PDT) Subject: [pypy-commit] pypy default: update TODO Message-ID: <5721d1d8.aaf0c20a.d8e6f.ffff8e38@mx.google.com> Author: Matti Picus Branch: Changeset: r84003:b31152f34254 Date: 2016-04-28 12:00 +0300 http://bitbucket.org/pypy/pypy/changeset/b31152f34254/ Log: update TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,2 @@ -* python setup.py install in numpy does not somehow tell setuptools - it's installed (I bet it's about the py27 tag) * reduce size of generated c code from slot definitions in slotdefs. +* remove broken DEBUG_REFCOUNT from pyobject.py From pypy.commits at gmail.com Thu Apr 28 05:12:36 2016 From: pypy.commits at gmail.com (amauryfa) Date: Thu, 28 Apr 2016 02:12:36 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Remove unused import, which causes an import loop in py3.5. Message-ID: <5721d404.e873c20a.f4bb.ffffd0f6@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r84004:1ae8011dde1c Date: 2016-04-28 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/1ae8011dde1c/ Log: Remove unused import, which causes an import loop in py3.5. diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -5,8 +5,6 @@ equivalent to x+y. ''' -import types - def countOf(a,b): 'countOf(a, b) -- Return the number of times b occurs in a.' From pypy.commits at gmail.com Thu Apr 28 05:28:18 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 02:28:18 -0700 (PDT) Subject: [pypy-commit] pypy default: fix: operator.attrgetter("name", not_a_string) would raise a confusing Message-ID: <5721d7b2.4412c30a.fec01.ffffcf7e@mx.google.com> Author: Armin Rigo Branch: Changeset: r84005:cc64a16862ce Date: 2016-04-28 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/cc64a16862ce/ Log: fix: operator.attrgetter("name", not_a_string) would raise a confusing error message diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -72,16 +72,14 @@ class attrgetter(object): def __init__(self, attr, *attrs): - if ( - not isinstance(attr, basestring) or - not all(isinstance(a, basestring) for a in attrs) - ): - def _raise_typeerror(obj): - raise TypeError( - "argument must be a string, not %r" % type(attr).__name__ - ) - self._call = _raise_typeerror - elif attrs: + if not isinstance(attr, basestring): + self._error(attr) + return + if attrs: + for a in attrs: + if not isinstance(a, basestring): + self._error(a) + return self._multi_attrs = [ a.split(".") for a in [attr] + list(attrs) ] @@ -93,6 +91,13 @@ self._single_attr = attr.split(".") self._call = self._single_attrgetter + def _error(self, attr): + def _raise_typeerror(obj): + raise TypeError( + "attribute name must be a string, not %r" % type(attr).__name__ + ) + self._call = _raise_typeerror + def __call__(self, obj): return self._call(obj) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -33,7 +33,8 @@ a.z = 'Z' assert operator.attrgetter('x','z','y')(a) == ('X', 'Z', 'Y') - raises(TypeError, operator.attrgetter('x', (), 'y'), a) + e = raises(TypeError, operator.attrgetter('x', (), 'y'), a) + assert str(e.value) == "attribute name must be a string, not 'tuple'" data = map(str, range(20)) assert operator.itemgetter(2,10,5)(data) == ('2', '10', '5') From pypy.commits at gmail.com Thu Apr 28 06:07:47 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 03:07:47 -0700 (PDT) Subject: [pypy-commit] stmgc default: mention the status with gcc 6.1 Message-ID: <5721e0f3.c9b0c20a.e1f64.ffffe301@mx.google.com> Author: Armin Rigo Branch: Changeset: r1987:7bb97205b028 Date: 2016-04-28 12:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/7bb97205b028/ Log: mention the status with gcc 6.1 diff --git a/gcc-seg-gs/README.txt b/gcc-seg-gs/README.txt --- a/gcc-seg-gs/README.txt +++ b/gcc-seg-gs/README.txt @@ -1,3 +1,21 @@ + +========== CURRENT STATUS ========== + +gcc 6.1 supports '__seg_gs' out of the box. You should use this version +of gcc (or more recent). + +If you want, you can follow the instructions below to download and +compile the standard gcc. Of course, it is likely that gcc 6.1 will +soon be available from your Linux distribution directly. + +Note that with gcc 6.1, you no longer need gcc-5.1.0-patch.diff, and you +should not need the "-fno-*" options either. + + + +========== OLDER INSTRUCTIONS ========== + + Get gcc release 5.1.0 from the download page: https://gcc.gnu.org/mirrors.html From pypy.commits at gmail.com Thu Apr 28 06:10:30 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 03:10:30 -0700 (PDT) Subject: [pypy-commit] stmgc default: Note that we *should* not need "-fno-*" but didn't really check so far Message-ID: <5721e196.d1981c0a.4053e.40ec@mx.google.com> Author: Armin Rigo Branch: Changeset: r1988:c2619b37c645 Date: 2016-04-28 12:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/c2619b37c645/ Log: Note that we *should* not need "-fno-*" but didn't really check so far diff --git a/gcc-seg-gs/README.txt b/gcc-seg-gs/README.txt --- a/gcc-seg-gs/README.txt +++ b/gcc-seg-gs/README.txt @@ -9,7 +9,8 @@ soon be available from your Linux distribution directly. Note that with gcc 6.1, you no longer need gcc-5.1.0-patch.diff, and you -should not need the "-fno-*" options either. +should not need the "-fno-*" options either (but we didn't check that +yet). From pypy.commits at gmail.com Thu Apr 28 09:02:31 2016 From: pypy.commits at gmail.com (amauryfa) Date: Thu, 28 Apr 2016 06:02:31 -0700 (PDT) Subject: [pypy-commit] pypy default: Add PyDateTimeAPI->TZInfoType. Because it's easy Message-ID: <572209e7.0f801c0a.51e59.ffffa6f2@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r84006:6630090dc44c Date: 2016-04-28 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/6630090dc44c/ Log: Add PyDateTimeAPI->TZInfoType. Because it's easy diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -15,6 +15,7 @@ ('DateTimeType', PyTypeObjectPtr), ('TimeType', PyTypeObjectPtr), ('DeltaType', PyTypeObjectPtr), + ('TZInfoType', PyTypeObjectPtr), )) @cpython_api([], lltype.Ptr(PyDateTime_CAPI)) @@ -40,6 +41,10 @@ datetimeAPI.c_DeltaType = rffi.cast( PyTypeObjectPtr, make_ref(space, w_type)) + w_type = space.getattr(w_datetime, space.wrap("tzinfo")) + datetimeAPI.c_TZInfoType = rffi.cast( + PyTypeObjectPtr, make_ref(space, w_type)) + return datetimeAPI PyDateTime_DateStruct = lltype.ForwardReference() diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h --- a/pypy/module/cpyext/include/datetime.h +++ b/pypy/module/cpyext/include/datetime.h @@ -11,6 +11,7 @@ PyTypeObject *DateTimeType; PyTypeObject *TimeType; PyTypeObject *DeltaType; + PyTypeObject *TZInfoType; } PyDateTime_CAPI; PyAPI_DATA(PyDateTime_CAPI*) PyDateTimeAPI; diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py --- a/pypy/module/cpyext/test/test_datetime.py +++ b/pypy/module/cpyext/test/test_datetime.py @@ -82,11 +82,12 @@ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI"); return NULL; } - return PyTuple_Pack(4, + return PyTuple_Pack(5, PyDateTimeAPI->DateType, PyDateTimeAPI->DateTimeType, PyDateTimeAPI->TimeType, - PyDateTimeAPI->DeltaType); + PyDateTimeAPI->DeltaType, + PyDateTimeAPI->TZInfoType); """), ("clear_types", "METH_NOARGS", """ @@ -94,6 +95,7 @@ Py_DECREF(PyDateTimeAPI->DateTimeType); Py_DECREF(PyDateTimeAPI->TimeType); Py_DECREF(PyDateTimeAPI->DeltaType); + Py_DECREF(PyDateTimeAPI->TZInfoType); Py_RETURN_NONE; """ ) @@ -102,5 +104,6 @@ assert module.get_types() == (datetime.date, datetime.datetime, datetime.time, - datetime.timedelta) + datetime.timedelta, + datetime.tzinfo) module.clear_types() From pypy.commits at gmail.com Thu Apr 28 09:21:02 2016 From: pypy.commits at gmail.com (amauryfa) Date: Thu, 28 Apr 2016 06:21:02 -0700 (PDT) Subject: [pypy-commit] pypy default: Define PyDateTime_TZInfo struct, and the corresponding Check() functions. Message-ID: <57220e3e.58811c0a.826cd.79bc@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r84007:9284beb465a0 Date: 2016-04-28 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/9284beb465a0/ Log: Define PyDateTime_TZInfo struct, and the corresponding Check() functions. diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -92,6 +92,7 @@ make_check_function("PyDate_Check", "date") make_check_function("PyTime_Check", "time") make_check_function("PyDelta_Check", "timedelta") +make_check_function("PyTZInfo_Check", "tzinfo") # Constructors diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h --- a/pypy/module/cpyext/include/datetime.h +++ b/pypy/module/cpyext/include/datetime.h @@ -37,6 +37,10 @@ PyObject_HEAD } PyDateTime_DateTime; +typedef struct { + PyObject_HEAD +} PyDateTime_TZInfo; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py --- a/pypy/module/cpyext/test/test_datetime.py +++ b/pypy/module/cpyext/test/test_datetime.py @@ -72,6 +72,16 @@ date = datetime.datetime.fromtimestamp(0) assert space.unwrap(space.str(w_date)) == str(date) + def test_tzinfo(self, space, api): + w_tzinfo = space.appexec( + [], """(): + from datetime import tzinfo + return tzinfo() + """) + assert api.PyTZInfo_Check(w_tzinfo) + assert api.PyTZInfo_CheckExact(w_tzinfo) + assert not api.PyTZInfo_Check(space.w_None) + class AppTestDatetime(AppTestCpythonExtensionBase): def test_CAPI(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Thu Apr 28 09:34:32 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 28 Apr 2016 06:34:32 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: seems it's wrong to share deldictvalue, for somewhat involved reasons: Message-ID: <57221168.2a18c20a.a67b2.4842@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r84008:a1aab87118e0 Date: 2016-04-28 16:33 +0300 http://bitbucket.org/pypy/pypy/changeset/a1aab87118e0/ Log: seems it's wrong to share deldictvalue, for somewhat involved reasons: there are classes that manage their own dict, and overwrite getdictvalue, setdictvalue, expecting to use the default implementation of deldictvalue. if you make a subclass of such a class, the shared mapdict base implementation would do the wrong thing. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -49,8 +49,20 @@ return True return False - # deldictvalue, getdict, setdict are mixed in from basemapdictobject - # def deldictvalue(self, space, attrname): + def deldictvalue(self, space, attrname): + from pypy.interpreter.error import OperationError + # check whether it has a dict and use that + w_dict = self.getdict(space) + if w_dict is not None: + try: + space.delitem(w_dict, space.wrap(attrname)) + return True + except OperationError, ex: + if not ex.match(space, space.w_KeyError): + raise + return False + + # getdict, setdict are mixed in from basemapdictobject # def getdict(self, space): # def setdict(self, space, w_dict): diff --git a/pypy/objspace/std/basemapdictobject.py b/pypy/objspace/std/basemapdictobject.py --- a/pypy/objspace/std/basemapdictobject.py +++ b/pypy/objspace/std/basemapdictobject.py @@ -59,25 +59,6 @@ # getdictvalue and setdictvalue are not done here, for performance reasons - def deldictvalue(self, space, attrname): - from pypy.interpreter.error import OperationError - map = self._get_mapdict_map() - if map is None: - # check whether it has a dict and use that - w_dict = self.getdict(space) - if w_dict is not None: - try: - space.delitem(w_dict, space.wrap(attrname)) - return True - except OperationError, ex: - if not ex.match(space, space.w_KeyError): - raise - return False - new_obj = map.delete(self, attrname, DICT) - if new_obj is None: - return False - self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - return True def getdict(self, space): from pypy.objspace.std.mapdict import MapDictStrategy @@ -175,6 +156,7 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline + from pypy.interpreter.error import oefmt map = self._get_mapdict_map() if map is None: # not a user-defined subclass diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -495,6 +495,14 @@ def setdictvalue(self, space, attrname, w_value): return self._get_mapdict_map().write(self, attrname, DICT, w_value) + def deldictvalue(self, space, attrname): + map = self._get_mapdict_map() + new_obj = map.delete(self, attrname, DICT) + if new_obj is None: + return False + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) + return True + class MapdictStorageMixin(object): def _mapdict_init_empty(self, map): From pypy.commits at gmail.com Thu Apr 28 10:48:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 07:48:41 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: fix some tests Message-ID: <572222c9.8d1f1c0a.2b11a.0f9f@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84009:4bb48bb31572 Date: 2016-04-28 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/4bb48bb31572/ Log: fix some tests diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -93,7 +93,7 @@ assert len(s) == 4 assert s == b'ab\x00c' - def test_string_tp_alloc(self): + def test_bytes_tp_alloc(self): module = self.import_extension('foo', [ ("tpalloc", "METH_NOARGS", """ @@ -117,7 +117,7 @@ """), ]) s = module.tpalloc() - assert s == '\x00' * 10 + assert s == b'\x00' * 10 def test_AsString(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -257,8 +257,6 @@ def setup_class(cls): cls.space.getbuiltinmodule("cpyext") - from pypy.module.imp.importing import importhook - importhook(cls.space, "os") # warm up reference counts #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] if not cls.runappdirect: diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -35,7 +35,7 @@ PyEval_InitThreads(); state0 = PyGILState_Ensure(); /* hangs here */ if (val != 0) - { + { state1 = PyGILState_Ensure(); PyGILState_Release(state1); } @@ -68,9 +68,8 @@ ]) assert module.get() == 3 + @py.test.mark.xfail(run=False, reason='PyThreadState_Get() segfaults on py3k and CPython') def test_basic_threadstate_dance(self): - if self.runappdirect: - py.test.xfail('segfault: on cpython cannot Get() a NULL tstate') module = self.import_extension('foo', [ ("dance", "METH_NOARGS", """ @@ -151,8 +150,8 @@ res = module.test() print "got", res assert res in (0, 1) - - + + class AppTestState(AppTestCpythonExtensionBase): def test_frame_tstate_tracing(self): diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -213,7 +213,7 @@ import re assert re.sre_compile._sre is module s = u"Foo " * 1000 + u"Bar" - prog = re.compile(ur"Foo.*Bar") + prog = re.compile("Foo.*Bar") assert prog.match(s) m = re.search("xyz", "xyzxyz") assert m diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -169,19 +169,6 @@ space.sys.get('getdefaultencoding') ) assert encoding == space.unwrap(w_default_encoding) - invalid = rffi.str2charp('invalid') - utf_8 = rffi.str2charp('utf-8') - prev_encoding = rffi.str2charp(space.unwrap(w_default_encoding)) - self.raises(space, api, TypeError, api.PyUnicode_SetDefaultEncoding, lltype.nullptr(rffi.CCHARP.TO)) - assert api.PyUnicode_SetDefaultEncoding(invalid) == -1 - assert api.PyErr_Occurred() is space.w_LookupError - api.PyErr_Clear() - assert api.PyUnicode_SetDefaultEncoding(utf_8) == 0 - assert rffi.charp2str(api.PyUnicode_GetDefaultEncoding()) == 'utf-8' - assert api.PyUnicode_SetDefaultEncoding(prev_encoding) == 0 - rffi.free_charp(invalid) - rffi.free_charp(utf_8) - rffi.free_charp(prev_encoding) def test_AS(self, space, api): word = space.wrap(u'spam') @@ -196,10 +183,10 @@ space.wrapbytes('spam')) utf_8 = rffi.str2charp('utf-8') - encoded = api.PyUnicode_AsEncodedString(space.wrap(u'sp�m'), + encoded = api.PyUnicode_AsEncodedString(space.wrap(u'späm'), utf_8, None) assert space.unwrap(encoded) == 'sp\xc3\xa4m' - encoded_obj = api.PyUnicode_AsEncodedObject(space.wrap(u'sp�m'), + encoded_obj = api.PyUnicode_AsEncodedObject(space.wrap(u'späm'), utf_8, None) assert space.eq_w(encoded, encoded_obj) self.raises(space, api, TypeError, api.PyUnicode_AsEncodedString, @@ -208,7 +195,7 @@ space.wrapbytes(''), None, None) ascii = rffi.str2charp('ascii') replace = rffi.str2charp('replace') - encoded = api.PyUnicode_AsEncodedString(space.wrap(u'sp�m'), + encoded = api.PyUnicode_AsEncodedString(space.wrap(u'späm'), ascii, replace) assert space.unwrap(encoded) == 'sp?m' rffi.free_charp(utf_8) @@ -249,14 +236,14 @@ ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 3) py_uni = rffi.cast(PyUnicodeObject, ar[0]) - assert py_uni.c_size == 3 + assert py_uni.c_length == 3 assert py_uni.c_buffer[1] == u'b' assert py_uni.c_buffer[3] == u'\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 10) py_uni = rffi.cast(PyUnicodeObject, ar[0]) - assert py_uni.c_size == 10 + assert py_uni.c_length == 10 assert py_uni.c_buffer[1] == 'b' assert py_uni.c_buffer[10] == '\x00' Py_DecRef(space, ar[0]) @@ -267,13 +254,13 @@ w_res = api.PyUnicode_AsUTF8String(w_u) assert space.type(w_res) is space.w_str assert space.unwrap(w_res) == 'sp\tm' - + def test_decode_utf8(self, space, api): u = rffi.str2charp(u'sp\x134m'.encode("utf-8")) w_u = api.PyUnicode_DecodeUTF8(u, 5, None) assert space.type(w_u) is space.w_unicode assert space.unwrap(w_u) == u'sp\x134m' - + w_u = api.PyUnicode_DecodeUTF8(u, 2, None) assert space.type(w_u) is space.w_unicode assert space.unwrap(w_u) == 'sp' @@ -489,7 +476,7 @@ ustr = "abcdef" w_ustr = space.wrap(ustr.decode("ascii")) result = api.PyUnicode_AsASCIIString(w_ustr) - + assert space.eq_w(space.wrapbytes(ustr), result) w_ustr = space.wrap(u"abcd\xe9f") From pypy.commits at gmail.com Thu Apr 28 10:59:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 07:59:37 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: rm stray pypy2-ism Message-ID: <57222559.0976c20a.9748d.5b66@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84010:28b76bd44159 Date: 2016-04-28 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/28b76bd44159/ Log: rm stray pypy2-ism diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -190,7 +190,6 @@ # if the typedef has a dict, then the rpython-class does all the dict # management, which means from the point of view of mapdict there is no # dict. - from pypy.module.__builtin__.interp_classobj import W_InstanceObject typedef = w_self.layout.typedef if (w_self.hasdict and not typedef.hasdict): w_self.terminator = DictTerminator(space, w_self) From pypy.commits at gmail.com Thu Apr 28 11:22:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 08:22:04 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: First try Message-ID: <57222a9c.d2711c0a.a3b99.fffffc5a@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84012:29ffb56dd3c7 Date: 2016-04-28 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/29ffb56dd3c7/ Log: First try diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here +from rpython.rlib.objectmodel import dont_inline from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager @@ -668,37 +669,129 @@ pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void) + +# ____________________________________________________________ + + +class WrapperCache(object): + def __init__(self, space): + self.space = space + self.wrapper_gens = {} # {signature: WrapperGen()} + self.callable2name = {} + self.stats = [0, 0] + +class WrapperGen(object): + def __init__(self, space, signature): + self.space = space + self.callable2name = {} + self.wrapper_second_level = make_wrapper_second_level( + self.space, self.callable2name, *signature) + + def make_wrapper(self, callable): + self.callable2name[callable] = callable.__name__ + wrapper_second_level = self.wrapper_second_level + + def wrapper(*args): + # no GC here, not even any GC object + args += (callable,) + return wrapper_second_level(*args) + + wrapper.__name__ = "wrapper for %r" % (callable, ) + return wrapper + + # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + # This logic is obscure, because we try to avoid creating one + # big wrapper() function for every callable. Instead we create + # only one per "signature". + + argnames = callable.api_func.argnames + argtypesw = zip(callable.api_func.argtypes, + [_name.startswith("w_") for _name in argnames]) + error_value = callable.api_func.error_value + if (isinstance(callable.api_func.restype, lltype.Ptr) + and error_value is not CANNOT_FAIL): + assert lltype.typeOf(error_value) == callable.api_func.restype + assert not error_value # only support error=NULL + error_value = 0 # because NULL is not hashable + + signature = (tuple(argtypesw), + callable.api_func.restype, + callable.api_func.result_borrowed, + error_value, + gil) + + cache = space.fromcache(WrapperCache) + cache.stats[1] += 1 + try: + wrapper_gen = cache.wrapper_gens[signature] + except KeyError: + print signature + wrapper_gen = cache.wrapper_gens[signature] = WrapperGen(space, + signature) + cache.stats[0] += 1 + print 'Wrapper cache [wrappers/total]:', cache.stats + return wrapper_gen.make_wrapper(callable) + + + at dont_inline +def deadlock_error(funcname): + fatalerror_notb("GIL deadlock detected when a CPython C extension " + "module calls %r" % (funcname,)) + + at dont_inline +def no_gil_error(funcname): + fatalerror_notb("GIL not held when a CPython C extension " + "module calls %r" % (funcname,)) + + at dont_inline +def unexpected_exception(funcname, e, tb): + print 'Fatal error in cpyext, CPython compatibility layer, calling',funcname + print 'Either report a bug or consider not using this particular extension' + if not we_are_translated(): + if tb is None: + tb = sys.exc_info()[2] + import traceback + traceback.print_exc() + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) + # we can't do much here, since we're in ctypes, swallow + else: + print str(e) + pypy_debug_catch_fatal_exception() + assert False + +def make_wrapper_second_level(space, callable2name, argtypesw, restype, + result_borrowed, error_value, gil): from rpython.rlib import rgil - names = callable.api_func.argnames - argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, - [name.startswith("w_") for name in names]))) - fatal_value = callable.api_func.restype._defl() + argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw)) + fatal_value = restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") pygilstate_ensure = (gil == "pygilstate_ensure") pygilstate_release = (gil == "pygilstate_release") assert (gil is None or gil_acquire or gil_release or pygilstate_ensure or pygilstate_release) - deadlock_error = ("GIL deadlock detected when a CPython C extension " - "module calls %r" % (callable.__name__,)) - no_gil_error = ("GIL not held when a CPython C extension " - "module calls %r" % (callable.__name__,)) + expected_nb_args = len(argtypesw) + pygilstate_ensure - @specialize.ll() - def wrapper(*args): + if isinstance(restype, lltype.Ptr) and error_value == 0: + error_value = lltype.nullptr(restype.TO) + + def wrapper_second_level(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + callable = args[-1] + args = args[:len(args)-1] # see "Handling of the GIL" above (careful, we don't have the GIL here) tid = rthread.get_or_make_ident() if gil_acquire: if cpyext_glob_tid_ptr[0] == tid: - fatalerror_notb(deadlock_error) + deadlock_error(callable2name[callable]) rgil.acquire() assert cpyext_glob_tid_ptr[0] == 0 elif pygilstate_ensure: @@ -711,7 +804,7 @@ args += (pystate.PyGILState_UNLOCKED,) else: if cpyext_glob_tid_ptr[0] != tid: - fatalerror_notb(no_gil_error) + no_gil_error(callable2name[callable]) cpyext_glob_tid_ptr[0] = 0 rffi.stackcounter.stacks_counter += 1 @@ -722,8 +815,7 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == (len(callable.api_func.argtypes) + - pygilstate_ensure) + assert len(args) == expected_nb_args for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -757,41 +849,28 @@ failed = False if failed: - error_value = callable.api_func.error_value if error_value is CANNOT_FAIL: raise SystemError("The function '%s' was not supposed to fail" % (callable.__name__,)) retval = error_value - elif is_PyObject(callable.api_func.restype): + elif is_PyObject(restype): if is_pyobj(result): retval = result else: if result is not None: - if callable.api_func.result_borrowed: + if result_borrowed: retval = as_pyobj(space, result) else: retval = make_ref(space, result) - retval = rffi.cast(callable.api_func.restype, retval) + retval = rffi.cast(restype, retval) else: retval = lltype.nullptr(PyObject.TO) - elif callable.api_func.restype is not lltype.Void: - retval = rffi.cast(callable.api_func.restype, result) + elif restype is not lltype.Void: + retval = rffi.cast(restype, result) except Exception, e: - print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ - print 'Either report a bug or consider not using this particular extension' - if not we_are_translated(): - if tb is None: - tb = sys.exc_info()[2] - import traceback - traceback.print_exc() - if sys.stdout == sys.__stdout__: - import pdb; pdb.post_mortem(tb) - # we can't do much here, since we're in ctypes, swallow - else: - print str(e) - pypy_debug_catch_fatal_exception() - assert False + unexpected_exception(callable2name[callable], e, tb) + rffi.stackcounter.stacks_counter -= 1 # see "Handling of the GIL" above @@ -808,9 +887,9 @@ cpyext_glob_tid_ptr[0] = tid return retval - callable._always_inline_ = 'try' - wrapper.__name__ = "wrapper for %r" % (callable, ) - return wrapper + + wrapper_second_level._dont_inline_ = True + return wrapper_second_level def process_va_name(name): return name.replace('*', '_star') From pypy.commits at gmail.com Thu Apr 28 11:22:02 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 08:22:02 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: Share the wrapper logic among all functions with the same signature Message-ID: <57222a9a.a9a1c20a.2eb94.6778@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84011:b01ae28d79f9 Date: 2016-04-28 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/b01ae28d79f9/ Log: Share the wrapper logic among all functions with the same signature From pypy.commits at gmail.com Thu Apr 28 11:28:45 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 28 Apr 2016 08:28:45 -0700 (PDT) Subject: [pypy-commit] stmgc default: Point the default to another demo, because demo2 is failing right now Message-ID: <57222c2d.109a1c0a.e5588.1281@mx.google.com> Author: Armin Rigo Branch: Changeset: r1989:60abbf069bc8 Date: 2016-04-28 17:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/60abbf069bc8/ Log: Point the default to another demo, because demo2 is failing right now diff --git a/c8/demo/Makefile b/c8/demo/Makefile --- a/c8/demo/Makefile +++ b/c8/demo/Makefile @@ -2,9 +2,9 @@ # Makefile for the demos. # -DEBUG_EXE = debug-demo2 -BUILD_EXE = build-demo2 -RELEASE_EXE = release-demo2 +DEBUG_EXE = debug-demo_simple +BUILD_EXE = build-demo_simple +RELEASE_EXE = release-demo_simple debug: $(DEBUG_EXE) # with prints and asserts build: $(BUILD_EXE) # without prints, but with asserts From pypy.commits at gmail.com Thu Apr 28 11:44:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 08:44:08 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: rm more stray pypy2-isms Message-ID: <57222fc8.cf8ec20a.1afa0.7632@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84013:a5270742f467 Date: 2016-04-28 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/a5270742f467/ Log: rm more stray pypy2-isms diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1171,8 +1171,6 @@ return self.wrap(self.lookup(w_obj, "__call__") is not None) def issequence_w(self, w_obj): - if self.is_oldstyle_instance(w_obj): - return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) flag = self.type(w_obj).flag_map_or_seq if flag == 'M': return False @@ -1182,8 +1180,6 @@ return (self.lookup(w_obj, '__getitem__') is not None) def ismapping_w(self, w_obj): - if self.is_oldstyle_instance(w_obj): - return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) flag = self.type(w_obj).flag_map_or_seq if flag == 'M': return True diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -148,7 +148,7 @@ py_memberdescr.c_d_member = w_obj.member def memberdescr_realize(space, obj): - # XXX NOT TESTED When is this ever called? + # XXX NOT TESTED When is this ever called? member = rffi.cast(lltype.Ptr(PyMemberDef), obj) w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) w_obj = space.allocate_instance(W_MemberDescr, w_type) @@ -405,8 +405,7 @@ # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: self.flag_map_or_seq = 'S' - elif (pto.c_tp_as_mapping and pto.c_tp_as_mapping.c_mp_subscript and - not (pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_slice)): + elif pto.c_tp_as_mapping and pto.c_tp_as_mapping.c_mp_subscript: self.flag_map_or_seq = 'M' if pto.c_tp_doc: self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc)) @@ -627,7 +626,7 @@ if py_type.c_ob_type: w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) - else: + else: # Somehow the tp_base type is created with no ob_type, notably # PyString_Type and PyBaseString_Type # While this is a hack, cpython does it as well. @@ -642,10 +641,10 @@ # inheriting tp_as_* slots base = py_type.c_tp_base if base: - if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number - if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence - if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping - if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer + if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number + if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence + if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping + if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer return w_obj diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -1,6 +1,5 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec -from pypy.module.__builtin__.interp_classobj import W_InstanceObject def index(space, w_a): @@ -207,12 +206,3 @@ @unwrap_spec(default=int) def _length_hint(space, w_iterable, default): return space.wrap(space.length_hint(w_iterable, default)) - - -def isMappingType(space, w_obj): - 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - return space.wrap(space.ismapping_w(w_obj)) - -def isSequenceType(space, w_obj): - 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - return space.wrap(space.issequence_w(w_obj)) From pypy.commits at gmail.com Thu Apr 28 12:17:51 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 09:17:51 -0700 (PDT) Subject: [pypy-commit] pypy default: Do 'import os' less hackishly in test_cpyext.py Message-ID: <572237af.85661c0a.873b8.3185@mx.google.com> Author: Ronan Lamy Branch: Changeset: r84014:606c858627a0 Date: 2016-04-28 17:16 +0100 http://bitbucket.org/pypy/pypy/changeset/606c858627a0/ Log: Do 'import os' less hackishly in test_cpyext.py diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -254,13 +254,15 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space.getbuiltinmodule("cpyext") - from pypy.module.imp.importing import importhook - importhook(cls.space, "os") # warm up reference counts + space = cls.space + space.getbuiltinmodule("cpyext") + # 'import os' to warm up reference counts + w_import = space.builtin.getdictvalue(space, '__import__') + space.call_function(w_import, space.wrap("os")) #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] if not cls.runappdirect: - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + cls.w_runappdirect = space.wrap(cls.runappdirect) def setup_method(self, func): @gateway.unwrap_spec(name=str) From pypy.commits at gmail.com Thu Apr 28 12:40:05 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 09:40:05 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: hg merge default Message-ID: <57223ce5.4374c20a.52888.ffff91cc@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84015:d9916a3b8cf3 Date: 2016-04-28 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/d9916a3b8cf3/ Log: hg merge default diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,2 @@ -* python setup.py install in numpy does not somehow tell setuptools - it's installed (I bet it's about the py27 tag) * reduce size of generated c code from slot definitions in slotdefs. +* remove broken DEBUG_REFCOUNT from pyobject.py diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -298,8 +298,16 @@ src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: - setattr(self, dst_option, - getattr(src_cmd_obj, src_option)) + try: + setattr(self, dst_option, + getattr(src_cmd_obj, src_option)) + except AttributeError: + # This was added after problems with setuptools 18.4. + # It seems that setuptools 20.9 fixes the problem. + # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv + # if I say "virtualenv -p pypy venv-pypy" then it + # just installs setuptools 18.4 from some cache... + pass def get_finalized_command(self, command, create=1): diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -15,6 +15,7 @@ ('DateTimeType', PyTypeObjectPtr), ('TimeType', PyTypeObjectPtr), ('DeltaType', PyTypeObjectPtr), + ('TZInfoType', PyTypeObjectPtr), )) @cpython_api([], lltype.Ptr(PyDateTime_CAPI)) @@ -40,6 +41,10 @@ datetimeAPI.c_DeltaType = rffi.cast( PyTypeObjectPtr, make_ref(space, w_type)) + w_type = space.getattr(w_datetime, space.wrap("tzinfo")) + datetimeAPI.c_TZInfoType = rffi.cast( + PyTypeObjectPtr, make_ref(space, w_type)) + return datetimeAPI PyDateTime_DateStruct = lltype.ForwardReference() @@ -87,6 +92,7 @@ make_check_function("PyDate_Check", "date") make_check_function("PyTime_Check", "time") make_check_function("PyDelta_Check", "timedelta") +make_check_function("PyTZInfo_Check", "tzinfo") # Constructors diff --git a/pypy/module/cpyext/include/datetime.h b/pypy/module/cpyext/include/datetime.h --- a/pypy/module/cpyext/include/datetime.h +++ b/pypy/module/cpyext/include/datetime.h @@ -11,6 +11,7 @@ PyTypeObject *DateTimeType; PyTypeObject *TimeType; PyTypeObject *DeltaType; + PyTypeObject *TZInfoType; } PyDateTime_CAPI; PyAPI_DATA(PyDateTime_CAPI*) PyDateTimeAPI; @@ -36,6 +37,10 @@ PyObject_HEAD } PyDateTime_DateTime; +typedef struct { + PyObject_HEAD +} PyDateTime_TZInfo; + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -256,11 +256,15 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space.getbuiltinmodule("cpyext") + space = cls.space + space.getbuiltinmodule("cpyext") + # 'import os' to warm up reference counts + w_import = space.builtin.getdictvalue(space, '__import__') + space.call_function(w_import, space.wrap("os")) #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] if not cls.runappdirect: - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + cls.w_runappdirect = space.wrap(cls.runappdirect) def setup_method(self, func): @gateway.unwrap_spec(name=str) diff --git a/pypy/module/cpyext/test/test_datetime.py b/pypy/module/cpyext/test/test_datetime.py --- a/pypy/module/cpyext/test/test_datetime.py +++ b/pypy/module/cpyext/test/test_datetime.py @@ -72,6 +72,16 @@ date = datetime.datetime.fromtimestamp(0) assert space.unwrap(space.str(w_date)) == str(date) + def test_tzinfo(self, space, api): + w_tzinfo = space.appexec( + [], """(): + from datetime import tzinfo + return tzinfo() + """) + assert api.PyTZInfo_Check(w_tzinfo) + assert api.PyTZInfo_CheckExact(w_tzinfo) + assert not api.PyTZInfo_Check(space.w_None) + class AppTestDatetime(AppTestCpythonExtensionBase): def test_CAPI(self): module = self.import_extension('foo', [ @@ -82,11 +92,12 @@ PyErr_SetString(PyExc_RuntimeError, "No PyDateTimeAPI"); return NULL; } - return PyTuple_Pack(4, + return PyTuple_Pack(5, PyDateTimeAPI->DateType, PyDateTimeAPI->DateTimeType, PyDateTimeAPI->TimeType, - PyDateTimeAPI->DeltaType); + PyDateTimeAPI->DeltaType, + PyDateTimeAPI->TZInfoType); """), ("clear_types", "METH_NOARGS", """ @@ -94,6 +105,7 @@ Py_DECREF(PyDateTimeAPI->DateTimeType); Py_DECREF(PyDateTimeAPI->TimeType); Py_DECREF(PyDateTimeAPI->DeltaType); + Py_DECREF(PyDateTimeAPI->TZInfoType); Py_RETURN_NONE; """ ) @@ -102,5 +114,6 @@ assert module.get_types() == (datetime.date, datetime.datetime, datetime.time, - datetime.timedelta) + datetime.timedelta, + datetime.tzinfo) module.clear_types() diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -5,7 +5,6 @@ equivalent to x+y. ''' -import types def countOf(a,b): diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -32,7 +32,8 @@ a.z = 'Z' assert operator.attrgetter('x','z','y')(a) == ('X', 'Z', 'Y') - raises(TypeError, operator.attrgetter, ('x', (), 'y')) + e = raises(TypeError, operator.attrgetter, ('x', (), 'y')) + assert str(e.value) == "attribute name must be a string, not 'tuple'" data = list(map(str, range(20))) assert operator.itemgetter(2,10,5)(data) == ('2', '10', '5') From pypy.commits at gmail.com Thu Apr 28 17:07:06 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Thu, 28 Apr 2016 14:07:06 -0700 (PDT) Subject: [pypy-commit] pypy default: Compile with -Werror in cpyext tests. Message-ID: <57227b7a.2457c20a.4ec44.fffff356@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r84016:e648ab75ea8f Date: 2016-04-25 10:15 -0700 http://bitbucket.org/pypy/pypy/changeset/e648ab75ea8f/ Log: Compile with -Werror in cpyext tests. Callers may build with -Werror and should not succeed with CPython but fail with cpyext. This forces us to have the same API -- in particular, to use the same pointer types and perform the same casts. See, for example, the change to the API in ndarrayobject.py. diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -229,9 +229,7 @@ order=order, owning=owning, w_subtype=w_subtype) gufunctype = lltype.Ptr(ufuncs.GenericUfunc) -# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there -# a problem with casting function pointers? - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -239,14 +237,18 @@ w_signature = rffi.charp2str(signature) return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature) - + def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature): funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) + # XXX For some reason funcs[i] segfaults, but this does not: + # cast(gufunctype, cast(CArrayPtr(CArrayPtr(gufunctype)), funcs)[i]) + # Something is very wrong here. + funcs_wrong_type = rffi.cast(rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), funcs) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs_wrong_type[i]), data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -258,7 +260,7 @@ w_signature, w_identity, w_name, w_doc, stack_inputs=True) return ufunc_generic - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py --- a/pypy/module/cpyext/test/test_borrow.py +++ b/pypy/module/cpyext/test/test_borrow.py @@ -12,13 +12,13 @@ PyObject *t = PyTuple_New(1); PyObject *f = PyFloat_FromDouble(42.0); PyObject *g = NULL; - printf("Refcnt1: %i\\n", f->ob_refcnt); + printf("Refcnt1: %zd\\n", f->ob_refcnt); PyTuple_SetItem(t, 0, f); // steals reference - printf("Refcnt2: %i\\n", f->ob_refcnt); + printf("Refcnt2: %zd\\n", f->ob_refcnt); f = PyTuple_GetItem(t, 0); // borrows reference - printf("Refcnt3: %i\\n", f->ob_refcnt); + printf("Refcnt3: %zd\\n", f->ob_refcnt); g = PyTuple_GetItem(t, 0); // borrows reference again - printf("Refcnt4: %i\\n", f->ob_refcnt); + printf("Refcnt4: %zd\\n", f->ob_refcnt); printf("COMPARE: %i\\n", f == g); fflush(stdout); Py_DECREF(t); diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -109,7 +109,10 @@ module = self.import_extension('foo', [ ("string_None", "METH_VARARGS", ''' - return PyString_AsString(Py_None); + if (PyString_AsString(Py_None)) { + Py_RETURN_NONE; + } + return NULL; ''' )]) raises(TypeError, module.string_None) diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py --- a/pypy/module/cpyext/test/test_classobject.py +++ b/pypy/module/cpyext/test/test_classobject.py @@ -29,7 +29,6 @@ assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1 assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2 assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,) - def test_lookup(self, space, api): w_instance = space.appexec([], """(): @@ -68,7 +67,7 @@ ("get_classtype", "METH_NOARGS", """ Py_INCREF(&PyClass_Type); - return &PyClass_Type; + return (PyObject*)&PyClass_Type; """)]) class C: pass assert module.get_classtype() is type(C) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -73,8 +73,7 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration", - "-g", "-O0"] + kwds["compile_extra"]=["-Werror", "-g", "-O0"] kwds["link_extra"]=["-g"] modname = modname.split('.')[-1] @@ -654,7 +653,7 @@ refcnt_after = true_obj->ob_refcnt; Py_DECREF(true_obj); Py_DECREF(true_obj); - fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); + fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) @@ -670,7 +669,7 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, + fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after, true_obj->ob_refcnt); return PyBool_FromLong(refcnt_after == refcnt + 1 && refcnt == true_obj->ob_refcnt); diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -171,7 +171,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray("\x9A\xBC", 2, + return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2, little_endian, is_signed); """), ]) @@ -187,7 +187,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3, little_endian, is_signed); """), ]) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -158,14 +158,14 @@ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -308,7 +308,7 @@ return NULL; Py_DECREF(a1); PyType_Modified(type); - value = PyObject_GetAttrString(type, "a"); + value = PyObject_GetAttrString((PyObject*)type, "a"); Py_DECREF(value); if (PyDict_SetItemString(type->tp_dict, "a", @@ -316,7 +316,7 @@ return NULL; Py_DECREF(a2); PyType_Modified(type); - value = PyObject_GetAttrString(type, "a"); + value = PyObject_GetAttrString((PyObject*)type, "a"); return value; ''' ) @@ -416,14 +416,14 @@ ("test_tp_getattro", "METH_VARARGS", ''' PyObject *obj = PyTuple_GET_ITEM(args, 0); - PyIntObject *value = PyTuple_GET_ITEM(args, 1); + PyIntObject *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1); if (!obj->ob_type->tp_getattro) { PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + PyIntObject *attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, @@ -433,7 +433,7 @@ Py_DECREF(name); Py_DECREF(attr); name = PyString_FromString("attr2"); - attr = obj->ob_type->tp_getattro(obj, name); + attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); @@ -628,8 +628,9 @@ } IntLikeObject; static int - intlike_nb_nonzero(IntLikeObject *v) + intlike_nb_nonzero(PyObject *o) { + IntLikeObject *v = (IntLikeObject*)o; if (v->value == -42) { PyErr_SetNone(PyExc_ValueError); return -1; From pypy.commits at gmail.com Thu Apr 28 17:07:09 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Thu, 28 Apr 2016 14:07:09 -0700 (PDT) Subject: [pypy-commit] pypy default: merge Message-ID: <57227b7d.c110c20a.cbd6f.7cf9@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r84017:0f0ab7f5334c Date: 2016-04-28 13:54 -0700 http://bitbucket.org/pypy/pypy/changeset/0f0ab7f5334c/ Log: merge diff too long, truncating to 2000 out of 12553 lines diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,2 @@ +* reduce size of generated c code from slot definitions in slotdefs. +* remove broken DEBUG_REFCOUNT from pyobject.py diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -298,8 +298,16 @@ src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: - setattr(self, dst_option, - getattr(src_cmd_obj, src_option)) + try: + setattr(self, dst_option, + getattr(src_cmd_obj, src_option)) + except AttributeError: + # This was added after problems with setuptools 18.4. + # It seems that setuptools 20.9 fixes the problem. + # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv + # if I say "virtualenv -p pypy venv-pypy" then it + # just installs setuptools 18.4 from some cache... + pass def get_finalized_command(self, command, create=1): diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -51,6 +51,8 @@ # if log is not opened, open it now if not _S_log_open: openlog() + if isinstance(message, unicode): + message = str(message) lib.syslog(priority, "%s", message) @builtinify diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,15 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -222,39 +213,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -265,22 +231,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -296,15 +250,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -317,16 +266,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -108,9 +108,9 @@ On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -12,9 +12,9 @@ The work on the cling backend has so far been done only for CPython, but bringing it to PyPy is a lot less work than developing it in the first place. -.. _Reflex: http://root.cern.ch/drupal/content/reflex -.. _CINT: http://root.cern.ch/drupal/content/cint -.. _cling: http://root.cern.ch/drupal/content/cling +.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _CINT: https://root.cern.ch/introduction-cint +.. _cling: https://root.cern.ch/cling .. _llvm: http://llvm.org/ .. _clang: http://clang.llvm.org/ @@ -283,7 +283,8 @@ core reflection set, but for the moment assume we want to have it in the reflection library that we are building for this example. -The ``genreflex`` script can be steered using a so-called `selection file`_, +The ``genreflex`` script can be steered using a so-called `selection file`_ +(see "Generating Reflex Dictionaries") which is a simple XML file specifying, either explicitly or by using a pattern, which classes, variables, namespaces, etc. to select from the given header file. @@ -305,7 +306,7 @@ -.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries +.. _selection file: https://root.cern.ch/how/how-use-reflex Now the reflection info can be generated and compiled:: @@ -811,7 +812,7 @@ immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment variable. -.. _PyROOT: http://root.cern.ch/drupal/content/pyroot +.. _PyROOT: https://root.cern.ch/pyroot There are a couple of minor differences between PyCintex and cppyy, most to do with naming. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -387,6 +387,14 @@ wrappers. On PyPy we can't tell the difference, so ``ismethod([].__add__) == ismethod(list.__add__) == True``. +* in CPython, the built-in types have attributes that can be + implemented in various ways. Depending on the way, if you try to + write to (or delete) a read-only (or undeletable) attribute, you get + either a ``TypeError`` or an ``AttributeError``. PyPy tries to + strike some middle ground between full consistency and full + compatibility here. This means that a few corner cases don't raise + the same exception, like ``del (lambda:None).__closure__``. + * in pure Python, if you write ``class A(object): def f(self): pass`` and have a subclass ``B`` which doesn't override ``f()``, then ``B.f(x)`` still checks that ``x`` is an instance of ``B``. In diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -21,7 +21,7 @@ :source:`pypy/doc/discussion/` drafts of ideas and documentation -:source:`pypy/goal/` our :ref:`main PyPy-translation scripts ` +:source:`pypy/goal/` our main PyPy-translation scripts live here :source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -13,3 +13,4 @@ discussion/improve-rpython discussion/ctypes-implementation discussion/jit-profiler + discussion/rawrefcount diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -79,7 +79,7 @@ :doc:`Full details ` are `available here `. .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: http://root.cern.ch/drupal/content/reflex +.. _Reflex: https://root.cern.ch/how/how-use-reflex RPython Mixed Modules diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -106,8 +106,12 @@ For information on which third party extensions work (or do not work) with PyPy see the `compatibility wiki`_. +For more information about how we manage refcounting semamtics see +rawrefcount_ + .. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ +.. _rawrefcount: discussion/rawrefcount.html On which platforms does PyPy run? diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations @@ -114,8 +120,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations @@ -133,8 +139,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,40 @@ .. startrev: aa60332382a1 .. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 + +.. branch: gcheader-decl + +Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. + +.. branch: cpyext-for-merge +Update cpyext C-API support: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots +After this branch, we are almost able to support upstream numpy via cpyext, so +we created (yet another) fork of numpy at github.com/pypy/numpy with the needed +changes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1176,7 +1176,27 @@ return self.w_False def issequence_w(self, w_obj): - return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return False + elif flag == 'S': + return True + else: + return (self.lookup(w_obj, '__getitem__') is not None) + + def ismapping_w(self, w_obj): + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return True + elif flag == 'S': + return False + else: + return (self.lookup(w_obj, '__getitem__') is not None and + self.lookup(w_obj, '__getslice__') is None) # The code below only works # for the simple case (new-style instance). diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." @@ -163,7 +162,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space, for_hidden=True) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: @@ -951,8 +942,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,175 +98,51 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(config, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = config, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + if needsdel: + cls = get_unique_interplevel_subclass(config, cls, False) + subcls = _getusercls(config, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(config, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + mixins_needed.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + mixins_needed.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) @@ -281,57 +157,16 @@ if parent_destructor is not None: self.enqueue_for_destruction(self.space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True # ____________________________________________________________ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,19 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + + self.cls_without_del = _getusercls( + space.config, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space.config, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: @@ -61,6 +60,7 @@ import __pypy__ import sys + result = [False] @__pypy__.hidden_applevel def test_hidden_with_tb(): def not_hidden(): 1/0 @@ -69,9 +69,11 @@ assert sys.exc_info() == (None, None, None) tb = __pypy__.get_hidden_tb() assert tb.tb_frame.f_code.co_name == 'not_hidden' - return True + result[0] = True + raise else: return False - assert test_hidden_with_tb() + raises(ZeroDivisionError, test_hidden_with_tb) + assert result[0] def test_lookup_special(self): from __pypy__ import lookup_special diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py --- a/pypy/module/_cffi_backend/wrapper.py +++ b/pypy/module/_cffi_backend/wrapper.py @@ -92,7 +92,8 @@ return ctype._call(self.fnptr, args_w) def descr_repr(self, space): - return space.wrap("" % (self.fnname,)) + doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) + return space.wrap("" % (doc,)) def descr_get_doc(self, space): doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -85,11 +87,13 @@ FILEP = rffi.COpaquePtr('FILE') if sys.platform == 'win32': - fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT) + dash = '_' else: - fileno = rffi.llexternal('fileno', [FILEP], rffi.INT) - + dash = '' +fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT) fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) +fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING], + FILEP, save_err=rffi.RFFI_SAVE_ERRNO) _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) def fclose(fp): @@ -119,16 +123,18 @@ def is_valid_fp(fp): return is_valid_fd(fileno(fp)) +pypy_decl = 'pypy_decl.h' + constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) -udir.join('pypy_decl.h').write("/* Will be filled later */\n") +udir.join(pypy_decl).write("/* Will be filled later */\n") udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) @@ -144,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -177,6 +195,61 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -196,6 +269,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -414,15 +490,14 @@ 'PyThread_acquire_lock', 'PyThread_release_lock', 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', - 'PyThread_ReInitTLS', + 'PyThread_ReInitTLS', 'PyThread_init_thread', + 'PyThread_start_new_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', - 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto', - 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', @@ -431,11 +506,11 @@ ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), - '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), + '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), + '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'), + '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), + '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), } FORWARD_DECLS = [] @@ -461,6 +536,7 @@ "PyUnicode_Type": "space.w_unicode", "PyBaseString_Type": "space.w_basestring", "PyDict_Type": "space.w_dict", + "PyDictProxy_Type": "cpyext.dictobject.make_frozendict(space)", "PyTuple_Type": "space.w_tuple", "PyList_Type": "space.w_list", "PySet_Type": "space.w_set", @@ -484,7 +560,7 @@ 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject PyDictObject PyClassObject'''.split(): @@ -602,7 +678,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -610,8 +693,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -620,7 +722,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -629,6 +732,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) @@ -688,8 +793,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -782,6 +899,9 @@ structindex = {} for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + # added only for the macro, not the decl + continue restype, args = c_function_signature(db, func) members.append('%s (*%s)(%s);' % (restype, name, args)) structindex[name] = len(structindex) @@ -798,7 +918,7 @@ global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): - if "#" in name: + if '#' in name: continue if typ == 'PyDateTime_CAPI*': continue @@ -822,7 +942,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code) + eci = build_eci(True, export_symbols, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -834,7 +954,7 @@ ob = rawrefcount.next_dead(PyObject) if not ob: break - print ob + print 'deallocating PyObject', ob decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" @@ -853,8 +973,8 @@ for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) - if name.endswith('#'): - name = name[:-1] + if '#' in name: + name = name.split('#')[0] isptr = False else: isptr = True @@ -899,7 +1019,7 @@ # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): - if name.startswith('cpyext_'): # XXX hack + if name.startswith('cpyext_') or func is None: # XXX hack continue pypyAPI[structindex[name]] = ctypes.cast( ll2ctypes.lltype2ctypes(func.get_llhelper(space)), @@ -952,6 +1072,8 @@ cpyext_type_init = self.cpyext_type_init self.cpyext_type_init = None for pto, w_type in cpyext_type_init: + if space.is_w(w_type, space.w_str): + pto.c_tp_itemsize = 1 finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -969,10 +1091,14 @@ pypy_macros = [] renamed_symbols = [] for name in export_symbols: - name = name.replace("#", "") + if '#' in name: + name,header = name.split('#') + else: + header = pypy_decl newname = mangle_name(prefix, name) assert newname, name - pypy_macros.append('#define %s %s' % (name, newname)) + if header == pypy_decl: + pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) @@ -1001,7 +1127,7 @@ # implement function callbacks and generate function decls functions = [] decls = {} - pypy_decls = decls['pypy_decl.h'] = [] + pypy_decls = decls[pypy_decl] = [] pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1017,6 +1143,8 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if not func: + continue if header == DEFAULT_HEADER: _name = name else: @@ -1042,12 +1170,15 @@ functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name) for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - name = name.replace("#", "") + if '#' in name: + name, header = name.split("#") typ = typ.replace("*", "") elif name.startswith('PyExc_'): typ = 'PyObject*' - pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + header = pypy_decl + if header != pypy_decl: + decls[header].append('#define %s %s' % (name, mangle_name(prefix, name))) + decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name)) for header_name in FUNCTIONS_BY_HEADER.keys(): header = decls[header_name] @@ -1075,9 +1206,10 @@ source_dir / "pysignals.c", source_dir / "pythread.c", source_dir / "missing.c", + source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code): +def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1099,9 +1231,11 @@ # Generate definitions for global structures structs = ["#include "] + if use_micronumpy: + structs.append('#include /* api.py line 1223 */') for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - structs.append('%s %s;' % (typ[:-1], name[:-1])) + if '#' in name: + structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) elif name.startswith('PyExc_'): structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) @@ -1142,11 +1276,12 @@ use_micronumpy = space.config.objspace.usemodules.micronumpy if not use_micronumpy: return use_micronumpy - # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject - global GLOBALS, SYMBOLS_C, separate_module_files - GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + # import registers api functions by side-effect, we also need HEADER + from pypy.module.cpyext.ndarrayobject import HEADER + global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: + FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None + GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy @@ -1156,14 +1291,18 @@ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() + prefix = 'PyPy' - generate_macros(export_symbols, prefix='PyPy') + generate_macros(export_symbols, prefix=prefix) functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix='PyPy') - code = "#include \n" + "\n".join(functions) + prefix=prefix) + code = "#include \n" + if use_micronumpy: + code += "#include /* api.py line 1290 */" + code += "\n".join(functions) - eci = build_eci(False, export_symbols, code) + eci = build_eci(False, export_symbols, code, use_micronumpy) space.fromcache(State).install_dll(eci) @@ -1175,9 +1314,14 @@ lines = ['PyObject *pypy_static_pyobjs[] = {\n'] include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - if name.endswith('#'): + if '#' in name: + name, header = name.split('#') assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') - typ, name = typ[:-1], name[:-1] + typ = typ[:-1] + if header != pypy_decl: + # since the #define is not in pypy_macros, do it here + mname = mangle_name(prefix, name) + include_lines.append('#define %s %s\n' % (name, mname)) elif name.startswith('PyExc_'): typ = 'PyTypeObject' name = '_' + name @@ -1204,6 +1348,8 @@ for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + continue newname = mangle_name('PyPy', name) or name deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) @@ -1211,7 +1357,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) @@ -1348,10 +1494,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,11 +2,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + make_typedescr, get_typedescr, as_pyobj, Py_IncRef) ## ## Implementation of PyStringObject @@ -27,7 +27,7 @@ ## Solution ## -------- ## -## PyStringObject contains two additional members: the size and a pointer to a +## PyStringObject contains two additional members: the ob_size and a pointer to a ## char buffer; it may be NULL. ## ## - A string allocated by pypy will be converted into a PyStringObject with a @@ -36,7 +36,7 @@ ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyStringObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no +## size+1, but the reference won't be stored in the global map; there is no ## corresponding object in pypy. When from_ref() or Py_INCREF() is called, ## the pypy string is created, and added to the global map of tracked ## objects. The buffer is then supposed to be immutable. @@ -52,8 +52,8 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +PyStringObjectFields = PyVarObjectFields + \ + (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function @@ -78,10 +78,11 @@ py_str = rffi.cast(PyStringObject, py_obj) buflen = length + 1 - py_str.c_size = length + py_str.c_ob_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw', zero=True, add_memory_pressure=True) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str def string_attach(space, py_obj, w_obj): @@ -90,8 +91,10 @@ buffer must not be modified. """ py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) + py_str.c_ob_size = len(space.str_w(w_obj)) py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL def string_realize(space, py_obj): """ @@ -99,8 +102,13 @@ be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + if not py_str.c_buffer: + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1, + flavor='raw', zero=True) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrap(s) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL track_reference(space, py_obj, w_obj) return w_obj @@ -169,12 +177,12 @@ ref_str.c_buffer = rffi.str2charp(s) buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 while ref_str.c_buffer[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @@ -183,7 +191,7 @@ def PyString_Size(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: ref = rffi.cast(PyStringObject, ref) - return ref.c_size + return ref.c_ob_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @@ -212,7 +220,7 @@ ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize - oldsize = py_str.c_size + oldsize = py_str.c_ob_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): @@ -236,15 +244,16 @@ if not ref[0]: return - if w_newpart is None or not PyString_Check(space, ref[0]) or \ - not PyString_Check(space, w_newpart): + if w_newpart is None or not PyString_Check(space, ref[0]) or not \ + (space.isinstance_w(w_newpart, space.w_str) or + space.isinstance_w(w_newpart, space.w_unicode)): Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) ref[0] = make_ref(space, w_newstr) + Py_IncRef(space, ref[0]) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyString_ConcatAndDel(space, ref, newpart): diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -15,6 +15,7 @@ ('DateTimeType', PyTypeObjectPtr), ('TimeType', PyTypeObjectPtr), ('DeltaType', PyTypeObjectPtr), + ('TZInfoType', PyTypeObjectPtr), )) @cpython_api([], lltype.Ptr(PyDateTime_CAPI)) @@ -40,11 +41,21 @@ datetimeAPI.c_DeltaType = rffi.cast( PyTypeObjectPtr, make_ref(space, w_type)) + w_type = space.getattr(w_datetime, space.wrap("tzinfo")) + datetimeAPI.c_TZInfoType = rffi.cast( + PyTypeObjectPtr, make_ref(space, w_type)) + return datetimeAPI -PyDateTime_Date = PyObject -PyDateTime_Time = PyObject -PyDateTime_DateTime = PyObject +PyDateTime_DateStruct = lltype.ForwardReference() +PyDateTime_TimeStruct = lltype.ForwardReference() +PyDateTime_DateTimeStruct = lltype.ForwardReference() +cpython_struct("PyDateTime_Date", PyObjectFields, PyDateTime_DateStruct) +PyDateTime_Date = lltype.Ptr(PyDateTime_DateStruct) +cpython_struct("PyDateTime_Time", PyObjectFields, PyDateTime_TimeStruct) +PyDateTime_Time = lltype.Ptr(PyDateTime_TimeStruct) +cpython_struct("PyDateTime_DateTime", PyObjectFields, PyDateTime_DateTimeStruct) +PyDateTime_DateTime = lltype.Ptr(PyDateTime_DateTimeStruct) PyDeltaObjectStruct = lltype.ForwardReference() cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct) @@ -81,6 +92,7 @@ make_check_function("PyDate_Check", "date") make_check_function("PyTime_Check", "time") make_check_function("PyDelta_Check", "timedelta") +make_check_function("PyTZInfo_Check", "tzinfo") # Constructors diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -1,16 +1,51 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.cpyext.api import ( +from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, cpython_api, cpython_struct, PyObject, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + make_typedescr, track_reference, from_ref) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.objspace.std.complexobject import W_ComplexObject from pypy.interpreter.error import OperationError PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex") -Py_complex_t = lltype.ForwardReference() +Py_complex_t = rffi.CStruct('Py_complex_t', + ('real', rffi.DOUBLE), + ('imag', rffi.DOUBLE), + hints={'size': 2 * rffi.sizeof(rffi.DOUBLE)}) Py_complex_ptr = lltype.Ptr(Py_complex_t) -Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE)) -cpython_struct("Py_complex", Py_complex_fields, Py_complex_t) + +PyComplexObjectStruct = lltype.ForwardReference() +PyComplexObject = lltype.Ptr(PyComplexObjectStruct) +PyComplexObjectFields = PyObjectFields + \ + (("cval", Py_complex_t),) +cpython_struct("PyComplexObject", PyComplexObjectFields, PyComplexObjectStruct) + + at bootstrap_function +def init_complexobject(space): + "Type description of PyComplexObject" + make_typedescr(space.w_complex.layout.typedef, + basestruct=PyComplexObject.TO, + attach=complex_attach, + realize=complex_realize) + +def complex_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyComplexObject with the given complex object. The + value must not be modified. + """ + assert isinstance(w_obj, W_ComplexObject) + py_obj = rffi.cast(PyComplexObject, py_obj) + py_obj.c_cval.c_real = w_obj.realval + py_obj.c_cval.c_imag = w_obj.imagval + +def complex_realize(space, obj): + py_obj = rffi.cast(PyComplexObject, obj) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_ComplexObject, w_type) + w_obj.__init__(py_obj.c_cval.c_real, py_obj.c_cval.c_imag) + track_reference(space, obj, w_obj) + return w_obj @cpython_api([lltype.Float, lltype.Float], PyObject) diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -23,6 +23,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) @@ -62,6 +63,7 @@ # NOTE: this works so far because all our dict strategies store # *values* as full objects, which stay alive as long as the dict is # alive and not modified. So we can return a borrowed ref. + # XXX this is wrong with IntMutableCell. Hope it works... return w_res @cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) @@ -104,6 +106,32 @@ """ return space.call_method(space.w_dict, "copy", w_obj) +def _has_val(space, w_dict, w_key): + try: + w_val = space.getitem(w_dict, w_key) + except OperationError as e: + if e.match(space, space.w_KeyError): + return False + else: + raise + return True + + at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1) +def PyDict_Merge(space, w_a, w_b, override): + """Iterate over mapping object b adding key-value pairs to dictionary a. From pypy.commits at gmail.com Thu Apr 28 18:09:41 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Thu, 28 Apr 2016 15:09:41 -0700 (PDT) Subject: [pypy-commit] pypy default: Undo 0f0ab7f / e648ab7 I think/hope. Message-ID: <57228a25.8a9d1c0a.b7bc2.137d@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r84018:db306c552216 Date: 2016-04-28 15:07 -0700 http://bitbucket.org/pypy/pypy/changeset/db306c552216/ Log: Undo 0f0ab7f / e648ab7 I think/hope. diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -239,7 +239,9 @@ gufunctype = lltype.Ptr(ufuncs.GenericUfunc) - at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, +# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there +# a problem with casting function pointers? + at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -247,18 +249,14 @@ w_signature = rffi.charp2str(signature) return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature) - + def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature): funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) - # XXX For some reason funcs[i] segfaults, but this does not: - # cast(gufunctype, cast(CArrayPtr(CArrayPtr(gufunctype)), funcs)[i]) - # Something is very wrong here. - funcs_wrong_type = rffi.cast(rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), funcs) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs_wrong_type[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -270,7 +268,7 @@ w_signature, w_identity, w_name, w_doc, stack_inputs=True) return ufunc_generic - at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py --- a/pypy/module/cpyext/test/test_borrow.py +++ b/pypy/module/cpyext/test/test_borrow.py @@ -12,13 +12,13 @@ PyObject *t = PyTuple_New(1); PyObject *f = PyFloat_FromDouble(42.0); PyObject *g = NULL; - printf("Refcnt1: %zd\\n", f->ob_refcnt); + printf("Refcnt1: %i\\n", f->ob_refcnt); PyTuple_SetItem(t, 0, f); // steals reference - printf("Refcnt2: %zd\\n", f->ob_refcnt); + printf("Refcnt2: %i\\n", f->ob_refcnt); f = PyTuple_GetItem(t, 0); // borrows reference - printf("Refcnt3: %zd\\n", f->ob_refcnt); + printf("Refcnt3: %i\\n", f->ob_refcnt); g = PyTuple_GetItem(t, 0); // borrows reference again - printf("Refcnt4: %zd\\n", f->ob_refcnt); + printf("Refcnt4: %i\\n", f->ob_refcnt); printf("COMPARE: %i\\n", f == g); fflush(stdout); Py_DECREF(t); diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -161,10 +161,7 @@ module = self.import_extension('foo', [ ("string_None", "METH_VARARGS", ''' - if (PyString_AsString(Py_None)) { - Py_RETURN_NONE; - } - return NULL; + return PyString_AsString(Py_None); ''' )]) raises(TypeError, module.string_None) diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py --- a/pypy/module/cpyext/test/test_classobject.py +++ b/pypy/module/cpyext/test/test_classobject.py @@ -29,6 +29,7 @@ assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1 assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2 assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,) + def test_lookup(self, space, api): w_instance = space.appexec([], """(): @@ -67,7 +68,7 @@ ("get_classtype", "METH_NOARGS", """ Py_INCREF(&PyClass_Type); - return (PyObject*)&PyClass_Type; + return &PyClass_Type; """)]) class C: pass assert module.get_classtype() is type(C) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -72,7 +72,8 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror", "-g", "-O0"] + kwds["compile_extra"]=["-Werror=implicit-function-declaration", + "-g", "-O0"] kwds["link_extra"]=["-g"] modname = modname.split('.')[-1] @@ -746,7 +747,7 @@ refcnt_after = true_obj->ob_refcnt; Py_DECREF(true_obj); Py_DECREF(true_obj); - fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after); + fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) @@ -762,7 +763,7 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after, + fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, true_obj->ob_refcnt); return PyBool_FromLong(refcnt_after == refcnt + 1 && refcnt == true_obj->ob_refcnt); diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -171,7 +171,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2, + return _PyLong_FromByteArray("\x9A\xBC", 2, little_endian, is_signed); """), ]) @@ -187,7 +187,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3, + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, little_endian, is_signed); """), ]) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -168,14 +168,14 @@ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) + if (val->ob_type != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) + if (val->ob_type != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -437,14 +437,14 @@ ("test_tp_getattro", "METH_VARARGS", ''' PyObject *name, *obj = PyTuple_GET_ITEM(args, 0); - PyIntObject *attr, *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1); + PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1); if (!obj->ob_type->tp_getattro) { PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); return NULL; } name = PyString_FromString("attr1"); - attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); + attr = obj->ob_type->tp_getattro(obj, name); if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, @@ -454,7 +454,7 @@ Py_DECREF(name); Py_DECREF(attr); name = PyString_FromString("attr2"); - attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); + attr = obj->ob_type->tp_getattro(obj, name); if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); @@ -758,9 +758,8 @@ } IntLikeObject; static int - intlike_nb_nonzero(PyObject *o) + intlike_nb_nonzero(IntLikeObject *v) { - IntLikeObject *v = (IntLikeObject*)o; if (v->value == -42) { PyErr_SetNone(PyExc_ValueError); return -1; From pypy.commits at gmail.com Thu Apr 28 23:08:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 28 Apr 2016 20:08:00 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: fix Message-ID: <5722d010.08851c0a.27447.4b3e@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84019:85b62252689f Date: 2016-04-29 04:07 +0100 http://bitbucket.org/pypy/pypy/changeset/85b62252689f/ Log: fix diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -321,10 +321,6 @@ code = """ %(PY_SSIZE_T_CLEAN)s #include - /* fix for cpython 2.7 Python.h if running tests with -A - since pypy compiles with -fvisibility-hidden */ - #undef PyMODINIT_FUNC - #define PyMODINIT_FUNC RPY_EXPORTED void %(body)s From pypy.commits at gmail.com Fri Apr 29 02:37:47 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 28 Apr 2016 23:37:47 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: updated the architectures on features.html and regenerated them Message-ID: <5723013b.d1981c0a.f1fc1.7b41@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r737:7da22d5ebd79 Date: 2016-04-29 08:37 +0200 http://bitbucket.org/pypy/pypy.org/changeset/7da22d5ebd79/ Log: updated the architectures on features.html and regenerated them diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -111,15 +111,15 @@ not support refcounting semantics. The following code won't fill the file immediately, but only after a certain period of time, when the GC does a collection:

    -
    open("filename", "w").write("stuff")
    +
    open("filename", "w").write("stuff")

    The proper fix is

    -
    f = open("filename", "w")
    f.write("stuff")
    f.close()
    +
    f = open("filename", "w")
    f.write("stuff")
    f.close()

    or using the with keyword

    -
    with open("filename", "w") as f:
    f.write("stuff")
    +
    with open("filename", "w") as f:
    f.write("stuff")

    Similarly, remember that you must close() a non-exhausted generator in order to have its pending finally or with clauses executed immediately:

    -
    def mygen():
    with foo:
    yield 42

    for x in mygen():
    if x == 42:
    break # foo.__exit__ is not run immediately!

    # fixed version:
    gen = mygen()
    try:
    for x in gen:
    if x == 42:
    break
    finally:
    gen.close()
    +
    def mygen():
    with foo:
    yield 42

    for x in mygen():
    if x == 42:
    break # foo.__exit__ is not run immediately!

    # fixed version:
    gen = mygen()
    try:
    for x in gen:
    if x == 42:
    break
    finally:
    gen.close()

    More generally, __del__() methods are not executed as predictively as on CPython: they run “some time later” in PyPy (or not at all if the program finishes running in the meantime). See more details diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -72,13 +72,20 @@

    PyPy is a replacement for CPython. It is built using the RPython language that was co-developed with it. The main reason to use it instead of CPython is speed: it runs generally faster (see next section).

    -

    PyPy implements Python 2.7.10 and runs on Intel -x86 (IA-32) , x86_64 and ARM platforms, with PPC being -stalled. It supports all of the core language, passing the Python test suite +

    PyPy implements Python 2.7.10. +It supports all of the core language, passing the Python test suite (with minor modifications that were already accepted in the main python in newer versions). It supports most of the commonly used Python standard library modules. For known differences with CPython, see our compatibility page.

    +

    The following CPU architectures are supported and maintained:

    +

    If you are interested in helping to move forward, see our howtohelp page.

    We also have a beta release of PyPy3 which implements Python 3.2.5. It runs on the same platforms as above.

    diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -10,14 +10,22 @@ language that was co-developed with it. The main reason to use it instead of CPython is speed: it runs generally faster (see next section). -**PyPy** implements **Python 2.7.10** and runs on Intel -`x86 (IA-32)`_ , `x86_64`_ and `ARM`_ platforms, with PPC being -stalled. It supports all of the core language, passing the Python test suite +**PyPy** implements **Python 2.7.10**. +It supports all of the core language, passing the Python test suite (with minor modifications that were already accepted in the main python in newer versions). It supports most of the commonly used Python standard library modules. For known differences with CPython, see our `compatibility`_ page. +The following CPU architectures are supported and maintained: + +* `x86 (IA-32)`_ +* `x86_64`_ +* `ARM`_ platforms +* `PPC`_ 64bit both little and big endian +* `s390x`_ running on Linux + + If you are interested in helping to move forward, see our `howtohelp`_ page. We also have a beta release of **PyPy3** which implements **Python 3.2.5**. @@ -27,6 +35,8 @@ .. _`x86 (IA-32)`: http://en.wikipedia.org/wiki/IA-32 .. _`x86_64`: http://en.wikipedia.org/wiki/X86_64 .. _`ARM`: http://en.wikipedia.org/wiki/ARM +.. _`PPC`: https://de.wikipedia.org/wiki/PowerPC +.. _`s390x`: https://de.wikipedia.org/wiki/System/390 .. _`howtohelp`: howtohelp.html From pypy.commits at gmail.com Fri Apr 29 02:48:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 28 Apr 2016 23:48:58 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: some clarification and added operating systems on which they run (regenerated the page as well) Message-ID: <572303da.952f1c0a.a2869.7e0c@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r738:53b0a434ba24 Date: 2016-04-29 08:48 +0200 http://bitbucket.org/pypy/pypy.org/changeset/53b0a434ba24/ Log: some clarification and added operating systems on which they run (regenerated the page as well) diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -83,9 +83,10 @@
  • x86 (IA-32)
  • x86_64
  • ARM platforms
  • -
  • PPC 64bit both little and big endian
  • -
  • s390x running on Linux
  • +
  • PowerPC 64bit both little and big endian
  • +
  • System Z (s390x)
  • +

    The x86 runs on several platforms such as Linux, MacOS X, Windows, FreeBSD. All others are only supported on Linux.

    If you are interested in helping to move forward, see our howtohelp page.

    We also have a beta release of PyPy3 which implements Python 3.2.5. It runs on the same platforms as above.

    diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -21,10 +21,12 @@ * `x86 (IA-32)`_ * `x86_64`_ -* `ARM`_ platforms -* `PPC`_ 64bit both little and big endian -* `s390x`_ running on Linux +* `ARM`_ platforms (ARMv6 or ARMv7, with VFPv3) +* `PowerPC`_ 64bit both little and big endian +* `System Z (s390x)`_ +PyPy's x86 version runs on several operating systems such on Linux 32/64, Mac OS X 64, Windows, OpenBSD, freebsd. +All others are only supported on Linux. If you are interested in helping to move forward, see our `howtohelp`_ page. @@ -35,8 +37,8 @@ .. _`x86 (IA-32)`: http://en.wikipedia.org/wiki/IA-32 .. _`x86_64`: http://en.wikipedia.org/wiki/X86_64 .. _`ARM`: http://en.wikipedia.org/wiki/ARM -.. _`PPC`: https://de.wikipedia.org/wiki/PowerPC -.. _`s390x`: https://de.wikipedia.org/wiki/System/390 +.. _`PowerPC`: https://de.wikipedia.org/wiki/PowerPC +.. _`System Z (s390x)`: https://de.wikipedia.org/wiki/System/390 .. _`howtohelp`: howtohelp.html From pypy.commits at gmail.com Fri Apr 29 02:58:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 28 Apr 2016 23:58:02 -0700 (PDT) Subject: [pypy-commit] pypy default: updated the FAQ entry (cpu architectures supported) Message-ID: <572305fa.e873c20a.f4bb.6da8@mx.google.com> Author: Richard Plangger Branch: Changeset: r84020:9806eaad6bd9 Date: 2016-04-29 08:57 +0200 http://bitbucket.org/pypy/pypy/changeset/9806eaad6bd9/ Log: updated the FAQ entry (cpu architectures supported) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -117,13 +117,22 @@ On which platforms does PyPy run? --------------------------------- -PyPy is regularly and extensively tested on Linux machines. It mostly +PyPy currently supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +PyPy is regularly and extensively tested on Linux machines. It works on Mac and Windows: it is tested there, but most of us are running -Linux so fixes may depend on 3rd-party contributions. PyPy's JIT -works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7). -Support for POWER (64-bit) is stalled at the moment. +Linux so fixes may depend on 3rd-party contributions. -To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or +To bootstrap from sources, PyPy can use either CPython 2.7 or another (e.g. older) PyPy. Cross-translation is not really supported: e.g. to build a 32-bit PyPy, you need to have a 32-bit environment. Cross-translation is only explicitly supported between a 32-bit Intel From pypy.commits at gmail.com Fri Apr 29 03:05:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 29 Apr 2016 00:05:31 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: x86 on one line, regenerated source Message-ID: <572307bb.a1ccc20a.fd2a9.6223@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r739:f5505f8ce414 Date: 2016-04-29 09:05 +0200 http://bitbucket.org/pypy/pypy.org/changeset/f5505f8ce414/ Log: x86 on one line, regenerated source diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -80,13 +80,13 @@ compatibility page.

    The following CPU architectures are supported and maintained:

    -

    The x86 runs on several platforms such as Linux, MacOS X, Windows, FreeBSD. All others are only supported on Linux.

    +

    PyPy's x86 version runs on several operating systems such on Linux 32/64, Mac OS X 64, Windows, OpenBSD, freebsd. +All others are only supported on Linux.

    If you are interested in helping to move forward, see our howtohelp page.

    We also have a beta release of PyPy3 which implements Python 3.2.5. It runs on the same platforms as above.

    diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -19,8 +19,7 @@ The following CPU architectures are supported and maintained: -* `x86 (IA-32)`_ -* `x86_64`_ +* `x86 (IA-32)`_ and `x86_64`_ * `ARM`_ platforms (ARMv6 or ARMv7, with VFPv3) * `PowerPC`_ 64bit both little and big endian * `System Z (s390x)`_ From pypy.commits at gmail.com Fri Apr 29 04:18:46 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:46 -0700 (PDT) Subject: [pypy-commit] pypy default: remove the weird _become method Message-ID: <572318e6.a9a1c20a.2eb94.ffff8199@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r84021:2a50600573cb Date: 2016-04-28 19:19 +0300 http://bitbucket.org/pypy/pypy/changeset/2a50600573cb/ Log: remove the weird _become method diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -417,11 +417,6 @@ def __repr__(self): return "" % (self.name, self.index, self.storageindex, self.back) -def _become(w_obj, new_obj): - # this is like the _become method, really, but we cannot use that due to - # RPython reasons - w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - class MapAttrCache(object): def __init__(self, space): SIZE = 1 << space.config.objspace.std.methodcachesizeexp @@ -464,9 +459,6 @@ # _____________________________________________ # methods needed for mapdict - def _become(self, new_obj): - self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - def _get_mapdict_map(self): return jit.promote(self.map) def _set_mapdict_map(self, map): @@ -482,7 +474,7 @@ def setclass(self, space, w_cls): new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject @@ -508,7 +500,7 @@ new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True @@ -549,7 +541,7 @@ new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True def getdict(self, space): @@ -811,7 +803,7 @@ def clear(self, w_dict): w_obj = self.unerase(w_dict.dstorage) new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj) - _become(w_obj, new_obj) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def popitem(self, w_dict): curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) @@ -836,7 +828,7 @@ def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) - _become(obj, new_obj) + obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) class MapDictIteratorKeys(BaseKeyIterator): def __init__(self, space, strategy, dictimplementation): From pypy.commits at gmail.com Fri Apr 29 04:18:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:52 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods-2: don't inline 5 fields into everything, just into old-style classes and Message-ID: <572318ec.45bd1c0a.7f058.ffffa3fc@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods-2 Changeset: r84024:5a2610668526 Date: 2016-04-28 23:14 +0300 http://bitbucket.org/pypy/pypy/changeset/5a2610668526/ Log: don't inline 5 fields into everything, just into old-style classes and subclasses of object diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -362,6 +362,26 @@ """) assert seen == [1] + def test_mapdict_number_of_slots(self): + space = self.space + a, b, c = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + a = A() + a.x = 1 + class B: + pass + b = B() + b.x = 1 + class C(int): + pass + c = C(1) + c.x = 1 + return a, b, c + """), 3) + assert not hasattr(a, "storage") + assert not hasattr(b, "storage") + assert hasattr(c, "storage") class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -124,13 +124,19 @@ def _getusercls(space, cls, wants_del, reallywantdict=False): from rpython.rlib import objectmodel + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.objspace.std.mapdict import (BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, - _make_storage_mixin_size_n) + _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [_make_storage_mixin_size_n()] + mixins_needed = [] + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) copy_methods = [BaseUserClassMapdict] if reallywantdict or not typedef.hasdict: # the type has no dict, mapdict to provide the dict From pypy.commits at gmail.com Fri Apr 29 04:18:53 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:53 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods-2: fix problems with __del__: only make a single RPython subclass if Message-ID: <572318ed.08851c0a.27447.ffffa347@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods-2 Changeset: r84025:1cb2c3897dbb Date: 2016-04-28 23:24 +0300 http://bitbucket.org/pypy/pypy/changeset/1cb2c3897dbb/ Log: fix problems with __del__: only make a single RPython subclass if the base class already has a del diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -383,6 +383,25 @@ assert not hasattr(b, "storage") assert hasattr(c, "storage") + def test_del(self): + space = self.space + a, b, c, d = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + class B(object): + def __del__(self): + pass + class F(file): + pass + class G(file): + def __del__(self): + pass + return A(), B(), F("xyz", "w"), G("ghi", "w") + """)) + assert type(b).__base__ is type(a) + assert hasattr(c, "__del__") + assert type(d) is type(c) + class AppTestTypeDef: def setup_class(cls): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -113,11 +113,18 @@ return _subclass_cache[key] except KeyError: # XXX can save a class if cls already has a __del__ - if needsdel: + keys = [key] + base_has_del = hasattr(cls, '__del__') + if base_has_del: + # if the base has a __del__, we only need one class + keys = [(space, cls, True), (space, cls, False)] + needsdel = True + elif needsdel: cls = get_unique_interplevel_subclass(space, cls, False) subcls = _getusercls(space, cls, needsdel) assert key not in _subclass_cache - _subclass_cache[key] = subcls + for key in keys: + _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} @@ -133,20 +140,24 @@ name = cls.__name__ + "User" mixins_needed = [] - if cls is W_ObjectObject or cls is W_InstanceObject: - mixins_needed.append(_make_storage_mixin_size_n()) - else: - mixins_needed.append(MapdictStorageMixin) - copy_methods = [BaseUserClassMapdict] - if reallywantdict or not typedef.hasdict: - # the type has no dict, mapdict to provide the dict - copy_methods.append(MapdictDictSupport) - name += "Dict" - if not typedef.weakrefable: - # the type does not support weakrefs yet, mapdict to provide weakref - # support - copy_methods.append(MapdictWeakrefSupport) - name += "Weakrefable" + copy_methods = [] + mixins_needed = [] + name = cls.__name__ + if not cls.user_overridden_class: + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) + copy_methods = [BaseUserClassMapdict] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + copy_methods.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + copy_methods.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: name += "Del" parent_destructor = getattr(cls, '__del__', None) From pypy.commits at gmail.com Fri Apr 29 04:18:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:48 -0700 (PDT) Subject: [pypy-commit] pypy default: be more consistent in naming Message-ID: <572318e8.0c371c0a.907eb.ffffa216@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r84022:2ac5162cb4ec Date: 2016-04-28 22:26 +0300 http://bitbucket.org/pypy/pypy/changeset/2ac5162cb4ec/ Log: be more consistent in naming diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -277,7 +277,7 @@ def copy(self, obj): result = Object() result.space = self.space - result._init_empty(self) + result._mapdict_init_empty(self) return result def length(self): @@ -286,7 +286,7 @@ def set_terminator(self, obj, terminator): result = Object() result.space = self.space - result._init_empty(terminator) + result._mapdict_init_empty(terminator) return result def remove_dict_entries(self, obj): @@ -304,7 +304,7 @@ def materialize_r_dict(self, space, obj, dict_w): result = Object() result.space = space - result._init_empty(self.devolved_dict_terminator) + result._mapdict_init_empty(self.devolved_dict_terminator) return result @@ -452,7 +452,7 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _init_empty, _mapdict_read_storage, + # assumes presence of _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map @@ -482,7 +482,7 @@ assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) - self._init_empty(w_subtype.terminator) + self._mapdict_init_empty(w_subtype.terminator) # methods needed for slots @@ -591,7 +591,7 @@ assert flag class MapdictStorageMixin(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) @@ -635,7 +635,7 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) setattr(self, valnmin1, erase_item(None)) @@ -723,7 +723,7 @@ def get_empty_storage(self): w_result = Object() terminator = self.space.fromcache(get_terminator_for_dicts) - w_result._init_empty(terminator) + w_result._mapdict_init_empty(terminator) return self.erase(w_result) def switch_to_object_strategy(self, w_dict): From pypy.commits at gmail.com Fri Apr 29 04:18:50 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:50 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods-2: new attempt: try to share methods by simply sticking the same function objects Message-ID: <572318ea.45bd1c0a.7f058.ffffa3fa@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods-2 Changeset: r84023:e42550cbef26 Date: 2016-04-28 23:02 +0300 http://bitbucket.org/pypy/pypy/changeset/e42550cbef26/ Log: new attempt: try to share methods by simply sticking the same function objects into a number of classes (thanks Armin for the idea) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,26 +103,26 @@ # we need two subclasses of the app-level type, one to add mapdict, and then one # to add del to not slow down the GC. -def get_unique_interplevel_subclass(config, cls, needsdel=False): +def get_unique_interplevel_subclass(space, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, needsdel + key = space, cls, needsdel try: return _subclass_cache[key] except KeyError: # XXX can save a class if cls already has a __del__ if needsdel: - cls = get_unique_interplevel_subclass(config, cls, False) - subcls = _getusercls(config, cls, needsdel) + cls = get_unique_interplevel_subclass(space, cls, False) + subcls = _getusercls(space, cls, needsdel) assert key not in _subclass_cache _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_del, reallywantdict=False): +def _getusercls(space, cls, wants_del, reallywantdict=False): from rpython.rlib import objectmodel from pypy.objspace.std.mapdict import (BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, @@ -130,15 +130,16 @@ typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] + mixins_needed = [_make_storage_mixin_size_n()] + copy_methods = [BaseUserClassMapdict] if reallywantdict or not typedef.hasdict: # the type has no dict, mapdict to provide the dict - mixins_needed.append(MapdictDictSupport) + copy_methods.append(MapdictDictSupport) name += "Dict" if not typedef.weakrefable: # the type does not support weakrefs yet, mapdict to provide weakref # support - mixins_needed.append(MapdictWeakrefSupport) + copy_methods.append(MapdictWeakrefSupport) name += "Weakrefable" if wants_del: name += "Del" @@ -148,14 +149,14 @@ parent_destructor(self) def call_applevel_del(self): assert isinstance(self, subcls) - self.space.userdel(self) + space.userdel(self) class Proto(object): def __del__(self): self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, call_applevel_del, + self.enqueue_for_destruction(space, call_applevel_del, 'method __del__ of ') if parent_destructor is not None: - self.enqueue_for_destruction(self.space, call_parent_del, + self.enqueue_for_destruction(space, call_parent_del, 'internal destructor of ') mixins_needed.append(Proto) @@ -163,10 +164,17 @@ user_overridden_class = True for base in mixins_needed: objectmodel.import_from_mixin(base) + for copycls in copy_methods: + _copy_methods(copycls, subcls) del subcls.base subcls.__name__ = name return subcls +def _copy_methods(copycls, subcls): + for key, value in copycls.__dict__.items(): + if (not key.startswith('__') or key == '__del__'): + setattr(subcls, key, value) + # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -195,9 +195,9 @@ return self.cls_without_del = _getusercls( - space.config, W_InstanceObject, False, reallywantdict=True) + space, W_InstanceObject, False, reallywantdict=True) self.cls_with_del = _getusercls( - space.config, W_InstanceObject, True, reallywantdict=True) + space, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -452,19 +452,12 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _mapdict_init_empty, _mapdict_read_storage, + # assumes presence of _get_mapdict_map, _set_mapdict_map + # _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map # _____________________________________________ - # methods needed for mapdict - - def _get_mapdict_map(self): - return jit.promote(self.map) - def _set_mapdict_map(self, map): - self.map = map - - # _____________________________________________ # objspace interface # class access @@ -478,7 +471,6 @@ def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject - self.space = space assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) @@ -591,6 +583,11 @@ assert flag class MapdictStorageMixin(object): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -605,6 +602,7 @@ def _mapdict_storage_length(self): return len(self.storage) + def _set_mapdict_storage_and_map(self, storage, map): self.storage = storage self.map = map @@ -635,6 +633,10 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -358,7 +358,7 @@ cls = cls.typedef.applevel_subclasses_base # subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.needsdel) + self, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) From pypy.commits at gmail.com Fri Apr 29 04:18:55 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:18:55 -0700 (PDT) Subject: [pypy-commit] pypy default: merge default Message-ID: <572318ef.8344c20a.c3eed.ffff8879@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r84026:616e8202f53f Date: 2016-04-29 11:17 +0300 http://bitbucket.org/pypy/pypy/changeset/616e8202f53f/ Log: merge default diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -277,7 +277,7 @@ def copy(self, obj): result = Object() result.space = self.space - result._init_empty(self) + result._mapdict_init_empty(self) return result def length(self): @@ -286,7 +286,7 @@ def set_terminator(self, obj, terminator): result = Object() result.space = self.space - result._init_empty(terminator) + result._mapdict_init_empty(terminator) return result def remove_dict_entries(self, obj): @@ -304,7 +304,7 @@ def materialize_r_dict(self, space, obj, dict_w): result = Object() result.space = space - result._init_empty(self.devolved_dict_terminator) + result._mapdict_init_empty(self.devolved_dict_terminator) return result @@ -417,11 +417,6 @@ def __repr__(self): return "" % (self.name, self.index, self.storageindex, self.back) -def _become(w_obj, new_obj): - # this is like the _become method, really, but we cannot use that due to - # RPython reasons - w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - class MapAttrCache(object): def __init__(self, space): SIZE = 1 << space.config.objspace.std.methodcachesizeexp @@ -457,16 +452,13 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _init_empty, _mapdict_read_storage, + # assumes presence of _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map # _____________________________________________ # methods needed for mapdict - def _become(self, new_obj): - self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - def _get_mapdict_map(self): return jit.promote(self.map) def _set_mapdict_map(self, map): @@ -482,7 +474,7 @@ def setclass(self, space, w_cls): new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject @@ -490,7 +482,7 @@ assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) - self._init_empty(w_subtype.terminator) + self._mapdict_init_empty(w_subtype.terminator) # methods needed for slots @@ -508,7 +500,7 @@ new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True @@ -549,7 +541,7 @@ new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True def getdict(self, space): @@ -599,7 +591,7 @@ assert flag class MapdictStorageMixin(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) @@ -643,7 +635,7 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): - def _init_empty(self, map): + def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) setattr(self, valnmin1, erase_item(None)) @@ -731,7 +723,7 @@ def get_empty_storage(self): w_result = Object() terminator = self.space.fromcache(get_terminator_for_dicts) - w_result._init_empty(terminator) + w_result._mapdict_init_empty(terminator) return self.erase(w_result) def switch_to_object_strategy(self, w_dict): @@ -811,7 +803,7 @@ def clear(self, w_dict): w_obj = self.unerase(w_dict.dstorage) new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj) - _become(w_obj, new_obj) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def popitem(self, w_dict): curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) @@ -836,7 +828,7 @@ def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) - _become(obj, new_obj) + obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) class MapDictIteratorKeys(BaseKeyIterator): def __init__(self, space, strategy, dictimplementation): From pypy.commits at gmail.com Fri Apr 29 04:20:27 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 01:20:27 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: close abandoned branch Message-ID: <5723194b.ce9d1c0a.cf851.ffff9edb@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r84027:fa02ea37ffeb Date: 2016-04-29 11:19 +0300 http://bitbucket.org/pypy/pypy/changeset/fa02ea37ffeb/ Log: close abandoned branch From pypy.commits at gmail.com Fri Apr 29 09:53:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 06:53:01 -0700 (PDT) Subject: [pypy-commit] pypy share-mapdict-methods: tighten test Message-ID: <5723673d.0f801c0a.d39d8.3284@mx.google.com> Author: Carl Friedrich Bolz Branch: share-mapdict-methods Changeset: r84028:8ff1bd873430 Date: 2016-04-29 12:08 +0300 http://bitbucket.org/pypy/pypy/changeset/8ff1bd873430/ Log: tighten test diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -135,7 +135,7 @@ prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) assert subcls is prevsubcls for cls, set in subclasses.items(): - assert len(set) <= 6, "%s has %d subclasses:\n%r" % ( + assert len(set) <= 2, "%s has %d subclasses:\n%r" % ( cls, len(set), list(set)) def test_getsetproperty(self): From pypy.commits at gmail.com Fri Apr 29 09:53:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 29 Apr 2016 06:53:03 -0700 (PDT) Subject: [pypy-commit] pypy default: merge share-mapdict-methods-2 Message-ID: <5723673f.6a70c20a.ed3cf.1579@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r84029:0b1c73820000 Date: 2016-04-29 16:52 +0300 http://bitbucket.org/pypy/pypy/changeset/0b1c73820000/ Log: merge share-mapdict-methods-2 reduce generated code for subclasses by using the same function objects in all generated subclasses. diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -362,6 +362,45 @@ """) assert seen == [1] + def test_mapdict_number_of_slots(self): + space = self.space + a, b, c = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + a = A() + a.x = 1 + class B: + pass + b = B() + b.x = 1 + class C(int): + pass + c = C(1) + c.x = 1 + return a, b, c + """), 3) + assert not hasattr(a, "storage") + assert not hasattr(b, "storage") + assert hasattr(c, "storage") + + def test_del(self): + space = self.space + a, b, c, d = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + class B(object): + def __del__(self): + pass + class F(file): + pass + class G(file): + def __del__(self): + pass + return A(), B(), F("xyz", "w"), G("ghi", "w") + """)) + assert type(b).__base__ is type(a) + assert hasattr(c, "__del__") + assert type(d) is type(c) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,43 +103,61 @@ # we need two subclasses of the app-level type, one to add mapdict, and then one # to add del to not slow down the GC. -def get_unique_interplevel_subclass(config, cls, needsdel=False): +def get_unique_interplevel_subclass(space, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, needsdel + key = space, cls, needsdel try: return _subclass_cache[key] except KeyError: # XXX can save a class if cls already has a __del__ - if needsdel: - cls = get_unique_interplevel_subclass(config, cls, False) - subcls = _getusercls(config, cls, needsdel) + keys = [key] + base_has_del = hasattr(cls, '__del__') + if base_has_del: + # if the base has a __del__, we only need one class + keys = [(space, cls, True), (space, cls, False)] + needsdel = True + elif needsdel: + cls = get_unique_interplevel_subclass(space, cls, False) + subcls = _getusercls(space, cls, needsdel) assert key not in _subclass_cache - _subclass_cache[key] = subcls + for key in keys: + _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_del, reallywantdict=False): +def _getusercls(space, cls, wants_del, reallywantdict=False): from rpython.rlib import objectmodel + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.objspace.std.mapdict import (BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, - _make_storage_mixin_size_n) + _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] - if reallywantdict or not typedef.hasdict: - # the type has no dict, mapdict to provide the dict - mixins_needed.append(MapdictDictSupport) - name += "Dict" - if not typedef.weakrefable: - # the type does not support weakrefs yet, mapdict to provide weakref - # support - mixins_needed.append(MapdictWeakrefSupport) - name += "Weakrefable" + mixins_needed = [] + copy_methods = [] + mixins_needed = [] + name = cls.__name__ + if not cls.user_overridden_class: + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) + copy_methods = [BaseUserClassMapdict] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + copy_methods.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + copy_methods.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: name += "Del" parent_destructor = getattr(cls, '__del__', None) @@ -148,14 +166,14 @@ parent_destructor(self) def call_applevel_del(self): assert isinstance(self, subcls) - self.space.userdel(self) + space.userdel(self) class Proto(object): def __del__(self): self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, call_applevel_del, + self.enqueue_for_destruction(space, call_applevel_del, 'method __del__ of ') if parent_destructor is not None: - self.enqueue_for_destruction(self.space, call_parent_del, + self.enqueue_for_destruction(space, call_parent_del, 'internal destructor of ') mixins_needed.append(Proto) @@ -163,10 +181,17 @@ user_overridden_class = True for base in mixins_needed: objectmodel.import_from_mixin(base) + for copycls in copy_methods: + _copy_methods(copycls, subcls) del subcls.base subcls.__name__ = name return subcls +def _copy_methods(copycls, subcls): + for key, value in copycls.__dict__.items(): + if (not key.startswith('__') or key == '__del__'): + setattr(subcls, key, value) + # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -195,9 +195,9 @@ return self.cls_without_del = _getusercls( - space.config, W_InstanceObject, False, reallywantdict=True) + space, W_InstanceObject, False, reallywantdict=True) self.cls_with_del = _getusercls( - space.config, W_InstanceObject, True, reallywantdict=True) + space, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -452,19 +452,12 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _mapdict_init_empty, _mapdict_read_storage, + # assumes presence of _get_mapdict_map, _set_mapdict_map + # _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map # _____________________________________________ - # methods needed for mapdict - - def _get_mapdict_map(self): - return jit.promote(self.map) - def _set_mapdict_map(self, map): - self.map = map - - # _____________________________________________ # objspace interface # class access @@ -478,7 +471,6 @@ def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject - self.space = space assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) @@ -591,6 +583,11 @@ assert flag class MapdictStorageMixin(object): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map @@ -605,6 +602,7 @@ def _mapdict_storage_length(self): return len(self.storage) + def _set_mapdict_storage_and_map(self, storage, map): self.storage = storage self.map = map @@ -635,6 +633,10 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -358,7 +358,7 @@ cls = cls.typedef.applevel_subclasses_base # subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.needsdel) + self, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) From pypy.commits at gmail.com Fri Apr 29 11:22:26 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 29 Apr 2016 08:22:26 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: Rephrase Message-ID: <57237c32.6322c20a.0c9b.37fe@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r740:d8f9d0dfa415 Date: 2016-04-29 17:23 +0200 http://bitbucket.org/pypy/pypy.org/changeset/d8f9d0dfa415/ Log: Rephrase diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -85,8 +85,9 @@
  • PowerPC 64bit both little and big endian
  • System Z (s390x)
  • -

    PyPy's x86 version runs on several operating systems such on Linux 32/64, Mac OS X 64, Windows, OpenBSD, freebsd. -All others are only supported on Linux.

    +

    PyPy's x86 version runs on several operating systems, such as Linux +(32/64 bits), Mac OS X (64 bits), Windows (32 bits), OpenBSD, FreeBSD. +All non-x86 versions are only supported on Linux.

    If you are interested in helping to move forward, see our howtohelp page.

    We also have a beta release of PyPy3 which implements Python 3.2.5. It runs on the same platforms as above.

    diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -24,8 +24,9 @@ * `PowerPC`_ 64bit both little and big endian * `System Z (s390x)`_ -PyPy's x86 version runs on several operating systems such on Linux 32/64, Mac OS X 64, Windows, OpenBSD, freebsd. -All others are only supported on Linux. +PyPy's x86 version runs on several operating systems, such as Linux +(32/64 bits), Mac OS X (64 bits), Windows (32 bits), OpenBSD, FreeBSD. +All non-x86 versions are only supported on Linux. If you are interested in helping to move forward, see our `howtohelp`_ page. From pypy.commits at gmail.com Fri Apr 29 11:24:29 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 29 Apr 2016 08:24:29 -0700 (PDT) Subject: [pypy-commit] pypy default: Detail Message-ID: <57237cad.26b0c20a.cb528.49a7@mx.google.com> Author: Armin Rigo Branch: Changeset: r84030:cbcb5db910b7 Date: 2016-04-29 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/cbcb5db910b7/ Log: Detail diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -120,7 +120,7 @@ PyPy currently supports: * **x86** machines on most common operating systems - (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD), * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, From pypy.commits at gmail.com Fri Apr 29 11:34:32 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 08:34:32 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: Fix some tests Message-ID: <57237f08.08851c0a.27447.5d81@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84031:3a1f7e742d24 Date: 2016-04-29 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/3a1f7e742d24/ Log: Fix some tests diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -387,9 +387,9 @@ freshly raised. This function steals the references of the arguments. To clear the exception state, pass *NULL* for all three arguments. For general rules about the three arguments, see :c:func:`PyErr_Restore`. - + .. note:: - + This function is not normally used by code that wants to handle exceptions. Rather, it can be used when code needs to save and restore the exception state temporarily. Use diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -700,7 +700,7 @@ UnicodeSubtype3.tp_bases = Py_BuildValue("(OO)", &UnicodeSubtype, &CustomType); if (PyType_Ready(&UnicodeSubtype3) < 0) - return; + return NULL; m = PyModule_Create(&moduledef); if (m == NULL) diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -62,23 +62,36 @@ static PyMethodDef sbkMethods[] = {{NULL, NULL, 0, NULL}}; +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "foo", + "Module Doc", + -1, + &sbkMethods, + NULL, + NULL, + NULL, + NULL, +}; + /* Initialize this module. */ #ifdef __GNUC__ extern __attribute__((visibility("default"))) #endif PyMODINIT_FUNC -initfoo3(void) +PyInit_foo3(void) { PyObject *mod, *d; footype.tp_base = &PyType_Type; PyType_Ready(&footype); - mod = Py_InitModule("foo3", sbkMethods); + mod = PyModule_Create(&moduledef); if (mod == NULL) - return; + return NULL; d = PyModule_GetDict(mod); if (d == NULL) - return; + return NULL; if (PyDict_SetItemString(d, "footype", (PyObject *)&footype) < 0) - return; + return NULL; + return mod; } diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -401,6 +401,8 @@ init = """PyObject *mod = PyModule_Create(&moduledef);""" if more_init: init += more_init + else: + init += "\nreturn mod;" return import_module(space, name=modname, init=init, body=body, w_include_dirs=w_include_dirs, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -123,7 +123,6 @@ return result; ''') assert b'foo\0bar\0baz' == pybuffer(b'foo\0bar\0baz') - assert 'foo\0bar\0baz' == pybuffer(bytearray('foo\0bar\0baz')) def test_pyarg_parse_string_fails(self): diff --git a/pypy/module/cpyext/test/test_iterator.py b/pypy/module/cpyext/test/test_iterator.py --- a/pypy/module/cpyext/test/test_iterator.py +++ b/pypy/module/cpyext/test/test_iterator.py @@ -40,7 +40,7 @@ ), ("check", "METH_O", ''' - return PyInt_FromLong( + return PyLong_FromLong( PySequence_Check(args) + PyMapping_Check(args) * 2); ''') @@ -49,7 +49,7 @@ static PyObject * mp_subscript(PyObject *self, PyObject *key) { - return PyInt_FromLong(42); + return PyLong_FromLong(42); } static Py_ssize_t mp_length(PyObject *self) @@ -69,10 +69,6 @@ e = raises(TypeError, iter, obj) assert str(e.value).endswith("object is not iterable") # - import operator - assert not operator.isSequenceType(obj) - assert operator.isMappingType(obj) - # assert module.check(obj) == 2 def test_iterable_nonmapping_object(self): @@ -90,7 +86,7 @@ '''), ("check", "METH_O", ''' - return PyInt_FromLong( + return PyLong_FromLong( PySequence_Check(args) + PyMapping_Check(args) * 2); ''') @@ -99,7 +95,7 @@ static PyObject * sq_item(PyObject *self, Py_ssize_t size) { - return PyInt_FromLong(42); + return PyLong_FromLong(42); } static Py_ssize_t sq_length(PyObject *self) @@ -117,11 +113,7 @@ assert len(obj) == 2 assert not hasattr(obj, "__iter__") it = iter(obj) - assert it.next() == 42 - assert it.next() == 42 - # - import operator - assert operator.isSequenceType(obj) - assert not operator.isMappingType(obj) + assert next(it) == 42 + assert next(it) == 42 # assert module.check(obj) == 1 diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -151,7 +151,7 @@ obj2 = module.UnicodeSubtype2() obj3 = module.UnicodeSubtype3() assert obj3.get_val() == 42 - assert len(type(obj3).mro()) == 6 + assert len(type(obj3).mro()) == 5 def test_init(self): module = self.import_module(name="foo") @@ -269,7 +269,7 @@ def test_tp_dict(self): foo = self.import_module("foo") module = self.import_extension('test', [ - ("read_tp_dict", "METH_O", + ("read_tp_dict", "METH_O", ''' PyObject *method; if (!args->ob_type->tp_dict) @@ -281,9 +281,7 @@ args->ob_type->tp_dict, "copy"); Py_INCREF(method); return method; - ''' - ) - ]) + ''')]) obj = foo.new() assert module.read_tp_dict(obj) == foo.fooType.copy @@ -377,7 +375,7 @@ assert api.PyErr_Occurred() is None def test_ndarray_ref(self, space, api): - py.test.py3k_skip('Numpy not yet supported on py3k') + pytest.py3k_skip('Numpy not yet supported on py3k') w_obj = space.appexec([], """(): import _numpypy return _numpypy.multiarray.dtype('int64').type(2)""") @@ -536,21 +534,22 @@ module = self.import_extension('foo', [ ("tp_call", "METH_VARARGS", ''' - PyObject *obj = PyTuple_GET_ITEM(args, 0); - PyObject *c_args = PyTuple_GET_ITEM(args, 1); - if (!obj->ob_type->tp_call) - { - PyErr_SetNone(PyExc_ValueError); - return NULL; - } - return obj->ob_type->tp_call(obj, c_args, NULL); - ''' - ) - ]) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + PyObject *c_args = PyTuple_GET_ITEM(args, 2); + if (!type->tp_call) + { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + return type->tp_call(obj, c_args, NULL); + ''')]) + class C: def __call__(self, *args): return args assert module.tp_call(type(C()), C(), ('x', 2)) == ('x', 2) + class D(type): def __call__(self, *args): return "foo! %r" % (args,) From pypy.commits at gmail.com Fri Apr 29 11:59:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 08:59:41 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: 2to3fy two tests Message-ID: <572384ed.4e981c0a.29b80.6655@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84032:641f1954ee8b Date: 2016-04-29 16:58 +0100 http://bitbucket.org/pypy/pypy/changeset/641f1954ee8b/ Log: 2to3fy two tests diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -202,15 +202,15 @@ module = self.import_extension('foo', [ ("malloctest", "METH_NOARGS", """ - PyObject *obj = PyObject_MALLOC(sizeof(PyIntObject)); - obj = PyObject_Init(obj, &PyInt_Type); + PyObject *obj = PyObject_MALLOC(sizeof(PyFloatObject)); + obj = PyObject_Init(obj, &PyFloat_Type); if (obj != NULL) - ((PyIntObject *)obj)->ob_ival = -424344; + ((PyFloatObject *)obj)->ob_fval = -12.34; return obj; """)]) x = module.malloctest() - assert type(x) is int - assert x == -424344 + assert type(x) is float + assert x == -12.34 @pytest.mark.skipif(True, reason='realloc not fully implemented') def test_object_realloc(self): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -44,10 +44,7 @@ '''), ]) module.double_ensure(0) - print '0 ok' module.double_ensure(1) - print '1 ok' - def test_thread_state_get(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Fri Apr 29 13:27:34 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Fri, 29 Apr 2016 10:27:34 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-werror: Compile with -Werror in cpyext tests. Message-ID: <57239986.171d1c0a.88858.ffff880d@mx.google.com> Author: Devin Jeanpierre Branch: cpyext-werror Changeset: r84033:2b6f392b8626 Date: 2016-04-29 10:25 -0700 http://bitbucket.org/pypy/pypy/changeset/2b6f392b8626/ Log: Compile with -Werror in cpyext tests. Callers may build with -Werror and should not succeed with CPython but fail with cpyext. This forces us to have the same API -- in particular, to use the same pointer types and perform the same casts. See, for example, the change to the API in ndarrayobject.py. (This is an hg backout of changeset db306c552216 from when I accidentally pushed this to pypy without code review... hee hee. :C) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -239,9 +239,7 @@ gufunctype = lltype.Ptr(ufuncs.GenericUfunc) -# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there -# a problem with casting function pointers? - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -249,14 +247,18 @@ w_signature = rffi.charp2str(signature) return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature) - + def do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return, w_signature): funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) + # XXX For some reason funcs[i] segfaults, but this does not: + # cast(gufunctype, cast(CArrayPtr(CArrayPtr(gufunctype)), funcs)[i]) + # Something is very wrong here. + funcs_wrong_type = rffi.cast(rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), funcs) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs_wrong_type[i]), data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -268,7 +270,7 @@ w_signature, w_identity, w_name, w_doc, stack_inputs=True) return ufunc_generic - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py --- a/pypy/module/cpyext/test/test_borrow.py +++ b/pypy/module/cpyext/test/test_borrow.py @@ -12,13 +12,13 @@ PyObject *t = PyTuple_New(1); PyObject *f = PyFloat_FromDouble(42.0); PyObject *g = NULL; - printf("Refcnt1: %i\\n", f->ob_refcnt); + printf("Refcnt1: %zd\\n", f->ob_refcnt); PyTuple_SetItem(t, 0, f); // steals reference - printf("Refcnt2: %i\\n", f->ob_refcnt); + printf("Refcnt2: %zd\\n", f->ob_refcnt); f = PyTuple_GetItem(t, 0); // borrows reference - printf("Refcnt3: %i\\n", f->ob_refcnt); + printf("Refcnt3: %zd\\n", f->ob_refcnt); g = PyTuple_GetItem(t, 0); // borrows reference again - printf("Refcnt4: %i\\n", f->ob_refcnt); + printf("Refcnt4: %zd\\n", f->ob_refcnt); printf("COMPARE: %i\\n", f == g); fflush(stdout); Py_DECREF(t); diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -161,7 +161,10 @@ module = self.import_extension('foo', [ ("string_None", "METH_VARARGS", ''' - return PyString_AsString(Py_None); + if (PyString_AsString(Py_None)) { + Py_RETURN_NONE; + } + return NULL; ''' )]) raises(TypeError, module.string_None) diff --git a/pypy/module/cpyext/test/test_classobject.py b/pypy/module/cpyext/test/test_classobject.py --- a/pypy/module/cpyext/test/test_classobject.py +++ b/pypy/module/cpyext/test/test_classobject.py @@ -29,7 +29,6 @@ assert space.unwrap(space.getattr(w_instance, space.wrap('x'))) == 1 assert space.unwrap(space.getattr(w_instance, space.wrap('y'))) == 2 assert space.unwrap(space.getattr(w_instance, space.wrap('args'))) == (3,) - def test_lookup(self, space, api): w_instance = space.appexec([], """(): @@ -68,7 +67,7 @@ ("get_classtype", "METH_NOARGS", """ Py_INCREF(&PyClass_Type); - return &PyClass_Type; + return (PyObject*)&PyClass_Type; """)]) class C: pass assert module.get_classtype() is type(C) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -72,8 +72,7 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration", - "-g", "-O0"] + kwds["compile_extra"]=["-Werror", "-g", "-O0"] kwds["link_extra"]=["-g"] modname = modname.split('.')[-1] @@ -747,7 +746,7 @@ refcnt_after = true_obj->ob_refcnt; Py_DECREF(true_obj); Py_DECREF(true_obj); - fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); + fprintf(stderr, "REFCNT %zd %zd\\n", refcnt, refcnt_after); return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) @@ -763,7 +762,7 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, + fprintf(stderr, "REFCNT2 %zd %zd %zd\\n", refcnt, refcnt_after, true_obj->ob_refcnt); return PyBool_FromLong(refcnt_after == refcnt + 1 && refcnt == true_obj->ob_refcnt); diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -171,7 +171,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray("\x9A\xBC", 2, + return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC", 2, little_endian, is_signed); """), ]) @@ -187,7 +187,7 @@ int little_endian, is_signed; if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) return NULL; - return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + return _PyLong_FromByteArray((unsigned char*)"\x9A\xBC\x41", 3, little_endian, is_signed); """), ]) diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -168,14 +168,14 @@ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; - if (val->ob_type != PyExc_TypeError) + if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -437,14 +437,14 @@ ("test_tp_getattro", "METH_VARARGS", ''' PyObject *name, *obj = PyTuple_GET_ITEM(args, 0); - PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1); + PyIntObject *attr, *value = (PyIntObject*) PyTuple_GET_ITEM(args, 1); if (!obj->ob_type->tp_getattro) { PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); return NULL; } name = PyString_FromString("attr1"); - attr = obj->ob_type->tp_getattro(obj, name); + attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, @@ -454,7 +454,7 @@ Py_DECREF(name); Py_DECREF(attr); name = PyString_FromString("attr2"); - attr = obj->ob_type->tp_getattro(obj, name); + attr = (PyIntObject*) obj->ob_type->tp_getattro(obj, name); if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); @@ -758,8 +758,9 @@ } IntLikeObject; static int - intlike_nb_nonzero(IntLikeObject *v) + intlike_nb_nonzero(PyObject *o) { + IntLikeObject *v = (IntLikeObject*)o; if (v->value == -42) { PyErr_SetNone(PyExc_ValueError); return -1; From pypy.commits at gmail.com Fri Apr 29 14:16:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 11:16:00 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: Create PyDictProxy_Check{,Exact} Message-ID: <5723a4e0.0b1f1c0a.e0b7a.ffff9649@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84034:e93dae9a1960 Date: 2016-04-29 19:15 +0100 http://bitbucket.org/pypy/pypy/changeset/e93dae9a1960/ Log: Create PyDictProxy_Check{,Exact} diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -230,10 +230,6 @@ return 0 return 1 - at cpython_api([PyObject], PyObject) -def PyDictProxy_New(space, w_dict): - return space.wrap(W_DictProxyObject(w_dict)) - @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyDict_HasOnlyStringKeys(space, w_dict): keys_w = space.unpackiterable(w_dict) diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/module/cpyext/dictproxyobject.py --- a/pypy/module/cpyext/dictproxyobject.py +++ b/pypy/module/cpyext/dictproxyobject.py @@ -4,6 +4,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef, interp2app +from pypy.module.cpyext.api import cpython_api, build_type_checkers +from pypy.module.cpyext.pyobject import PyObject class W_DictProxyObject(W_Root): "Read-only proxy for mappings." @@ -47,15 +49,22 @@ W_DictProxyObject.typedef = TypeDef( 'mappingproxy', - __len__ = interp2app(W_DictProxyObject.descr_len), - __getitem__ = interp2app(W_DictProxyObject.descr_getitem), - __contains__ = interp2app(W_DictProxyObject.descr_contains), - __iter__ = interp2app(W_DictProxyObject.descr_iter), - __str__ = interp2app(W_DictProxyObject.descr_str), - __repr__ = interp2app(W_DictProxyObject.descr_repr), - get = interp2app(W_DictProxyObject.get_w), - keys = interp2app(W_DictProxyObject.keys_w), - values = interp2app(W_DictProxyObject.values_w), - items = interp2app(W_DictProxyObject.items_w), - copy = interp2app(W_DictProxyObject.copy_w) - ) + __len__=interp2app(W_DictProxyObject.descr_len), + __getitem__=interp2app(W_DictProxyObject.descr_getitem), + __contains__=interp2app(W_DictProxyObject.descr_contains), + __iter__=interp2app(W_DictProxyObject.descr_iter), + __str__=interp2app(W_DictProxyObject.descr_str), + __repr__=interp2app(W_DictProxyObject.descr_repr), + get=interp2app(W_DictProxyObject.get_w), + keys=interp2app(W_DictProxyObject.keys_w), + values=interp2app(W_DictProxyObject.values_w), + items=interp2app(W_DictProxyObject.items_w), + copy=interp2app(W_DictProxyObject.copy_w) +) + +PyDictProxy_Check, PyDictProxy_CheckExact = build_type_checkers( + "DictProxy", W_DictProxyObject) + + at cpython_api([PyObject], PyObject) +def PyDictProxy_New(space, w_dict): + return space.wrap(W_DictProxyObject(w_dict)) From pypy.commits at gmail.com Fri Apr 29 14:38:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 11:38:44 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: more test fixes Message-ID: <5723aa34.2a18c20a.a67b2.ffff8c96@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84035:a03042e29c90 Date: 2016-04-29 19:38 +0100 http://bitbucket.org/pypy/pypy/changeset/a03042e29c90/ Log: more test fixes diff --git a/pypy/module/cpyext/test/test_frameobject.py b/pypy/module/cpyext/test/test_frameobject.py --- a/pypy/module/cpyext/test/test_frameobject.py +++ b/pypy/module/cpyext/test/test_frameobject.py @@ -59,7 +59,7 @@ """), ], prologue='#include "frameobject.h"') exc = raises(ValueError, module.raise_exception) - exc.value[0] == 'error message' + exc.value.args[0] == 'error message' if not self.runappdirect: frame = exc.traceback.tb_frame assert frame.f_code.co_filename == "filename" @@ -75,7 +75,7 @@ """ int check; PyObject *type, *value, *tb; - PyObject *ret = PyRun_String("XXX", Py_eval_input, + PyObject *ret = PyRun_String("XXX", Py_eval_input, Py_None, Py_None); if (ret) { Py_DECREF(ret); diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -19,13 +19,13 @@ assert not api.PyList_Check(space.newtuple([])) assert not api.PyList_CheckExact(space.newtuple([])) - + def test_get_size(self, space, api): l = api.PyList_New(0) assert api.PyList_GET_SIZE(l) == 0 api.PyList_Append(l, space.wrap(3)) assert api.PyList_GET_SIZE(l) == 1 - + def test_size(self, space, api): l = space.newlist([space.w_None, space.w_None]) assert api.PyList_Size(l) == 2 @@ -42,12 +42,12 @@ # insert at index -1: next-to-last assert api.PyList_Insert(w_l, -1, space.wrap(3)) == 0 assert space.unwrap(api.PyList_GetItem(w_l, 3)) == 3 - + def test_sort(self, space, api): l = space.newlist([space.wrap(1), space.wrap(0), space.wrap(7000)]) assert api.PyList_Sort(l) == 0 assert space.eq_w(l, space.newlist([space.wrap(0), space.wrap(1), space.wrap(7000)])) - + def test_reverse(self, space, api): l = space.newlist([space.wrap(3), space.wrap(2), space.wrap(1)]) assert api.PyList_Reverse(l) == 0 @@ -117,9 +117,9 @@ l = L([1]) module.setlistitem(l, 0) assert len(l) == 1 - + raises(SystemError, module.setlistitem, (1, 2, 3), 0) - + l = [] module.appendlist(l, 14) assert len(l) == 1 @@ -144,7 +144,7 @@ PyObject* o, *o2, *o3; o = PyList_New(1); - o2 = PyInt_FromLong(0); + o2 = PyLong_FromLong(0); PyList_SET_ITEM(o, 0, o2); o2 = NULL; diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -1,7 +1,6 @@ -from pypy.module.cpyext.api import fopen, fclose, fwrite from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.object import Py_PRINT_RAW -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import rffi from rpython.tool.udir import udir import pytest @@ -56,18 +55,6 @@ w_file = api.PyFile_FromString(filename, mode) assert space.str_w(api.PyFile_Name(w_file)) == name - def test_file_fromfile(self, space, api): - name = str(udir / "_test_file") - with rffi.scoped_str2charp(name) as filename: - with rffi.scoped_str2charp("wb") as mode: - w_file = api.PyFile_FromString(filename, mode) - fp = api.PyFile_AsFile(w_file) - assert fp is not None - w_file2 = api.PyFile_FromFile(fp, filename, mode, None) - assert w_file2 is not None - assert api.PyFile_Check(w_file2) - assert space.str_w(api.PyFile_Name(w_file2)) == name - @pytest.mark.xfail def test_file_setbufsize(self, space, api): api.PyFile_SetBufSize() From pypy.commits at gmail.com Fri Apr 29 15:03:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 12:03:17 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: more test fixes Message-ID: <5723aff5.2179c20a.b24a7.ffff90f0@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84036:81e8c1b4cde0 Date: 2016-04-29 20:02 +0100 http://bitbucket.org/pypy/pypy/changeset/81e8c1b4cde0/ Log: more test fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1320,7 +1320,7 @@ for name, (typ, expr) in sorted(GLOBALS.items()): if '#' in name: name, header = name.split('#') - assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') + assert typ in ('PyObject*', 'PyTypeObject*') typ = typ[:-1] if header != pypy_decl: # since the #define is not in pypy_macros, do it here diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.descriptor import get_dtype_cache -import pypy.module.micronumpy.constants as NPY +import pypy.module.micronumpy.constants as NPY py.test.skip("Micronumpy not yet supported on py3k.") @@ -239,7 +239,7 @@ skip('numpy not importable') else: cls.w_numpy_include = cls.space.wrap([]) - + def test_ndarray_object_c(self): mod = self.import_extension('foo', [ @@ -272,7 +272,7 @@ { /* Should have failed */ Py_DECREF(obj1); - return NULL; + return NULL; } return obj1; ''' @@ -299,14 +299,14 @@ ), ("test_DescrFromType", "METH_O", """ - Signed typenum = PyInt_AsLong(args); + Signed typenum = PyLong_AsLong(args); return PyArray_DescrFromType(typenum); """ ), - ], include_dirs=self.numpy_include, + ], include_dirs=self.numpy_include, prologue=''' #ifdef PYPY_VERSION - #include + #include #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include @@ -314,7 +314,7 @@ #define PyArray_FromObject _PyArray_FromObject #define PyArray_FromAny _PyArray_FromAny #endif - ''', + ''', more_init = ''' #ifndef PYPY_VERSION import_array(); @@ -348,14 +348,14 @@ Py_INCREF(obj); return obj; '''), - ], include_dirs=self.numpy_include, + ], include_dirs=self.numpy_include, prologue=''' #ifdef PYPY_VERSION - #include + #include #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include - ''', + ''', more_init = ''' #ifndef PYPY_VERSION import_array(); @@ -402,14 +402,14 @@ void *array_data[] = {NULL, NULL}; return PyUFunc_FromFuncAndDataAndSignature(funcs, array_data, types, 1, 1, 1, PyUFunc_None, - "float_3x3", - "a ufunc that tests a more complicated signature", + "float_3x3", + "a ufunc that tests a more complicated signature", 0, "(m,m)->(m,m)"); """), - ], include_dirs=self.numpy_include, + ], include_dirs=self.numpy_include, prologue=''' #ifdef PYPY_VERSION - #include + #include #endif #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include @@ -479,7 +479,7 @@ res += +10; *((float *)args[1]) = res; }; - + ''', more_init = ''' #ifndef PYPY_VERSION import_array(); diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -141,11 +141,10 @@ module = self.import_extension('foo', [ ("test", "METH_NOARGS", """ - return PyInt_FromLong(PyEval_ThreadsInitialized()); + return PyLong_FromLong(PyEval_ThreadsInitialized()); """), ]) res = module.test() - print "got", res assert res in (0, 1) From pypy.commits at gmail.com Fri Apr 29 17:04:08 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:08 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/33_fix_itertools (pull request #437) Message-ID: <5723cc48.a272c20a.cb4c7.ffffb25e@mx.google.com> Author: Mark Young Branch: py3k Changeset: r84047:e16f720c6101 Date: 2016-04-29 17:02 -0400 http://bitbucket.org/pypy/pypy/changeset/e16f720c6101/ Log: Merged in marky1991/pypy_new/33_fix_itertools (pull request #437) 33_fix_itertools diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -1204,6 +1204,7 @@ p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a + support.gc_collect() self.assertRaises(ReferenceError, getattr, p, '__class__') ans = list('abc') diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -49,6 +49,8 @@ 'starmap' : 'interp_itertools.W_StarMap', 'takewhile' : 'interp_itertools.W_TakeWhile', 'tee' : 'interp_itertools.tee', + '_tee' : 'interp_itertools.W_TeeIterable', + '_tee_dataobject' : 'interp_itertools.W_TeeChainedListNode', 'zip_longest' : 'interp_itertools.W_ZipLongest', } diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -318,6 +318,7 @@ def __init__(self, space, w_iterable, w_startstop, args_w): self.iterable = space.iter(w_iterable) self.space = space + self.exhausted = False num_args = len(args_w) @@ -326,7 +327,7 @@ w_stop = w_startstop elif num_args <= 2: if space.is_w(w_startstop, space.w_None): - start = 0 + start = -1 else: start = self.arg_int_w(w_startstop, 0, "Indicies for islice() must be None or non-negative integers") @@ -383,24 +384,24 @@ # has no effect any more if stop > 0: self._ignore_items(stop) - self.iterable = None + self.exhausted = True raise OperationError(self.space.w_StopIteration, self.space.w_None) self.stop = stop - (ignore + 1) if ignore > 0: self._ignore_items(ignore) - if self.iterable is None: + if self.exhausted: raise OperationError(self.space.w_StopIteration, self.space.w_None) try: return self.space.next(self.iterable) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): - self.iterable = None + self.exhausted = True raise def _ignore_items(self, num): w_iterator = self.iterable - if w_iterator is None: + if self.exhausted: raise OperationError(self.space.w_StopIteration, self.space.w_None) tp = self.space.type(w_iterator) @@ -413,18 +414,28 @@ self.space.next(w_iterator) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): - self.iterable = None + self.exhausted = True raise num -= 1 if num <= 0: break def descr_reduce(self, space): + start = self.start + stop = self.stop + if start == -1: + w_start = space.w_None + else: + w_start = space.wrap(start) + if stop == -1: + w_stop = space.w_None + else: + w_stop = space.wrap(stop) return space.newtuple([ space.type(self), space.newtuple([self.iterable, - space.wrap(self.start), - space.wrap(self.stop), + w_start, + w_stop, space.wrap(self.ignore + 1)]), ]) @@ -809,53 +820,130 @@ raise OperationError(space.w_ValueError, space.wrap("n must be >= 0")) if isinstance(w_iterable, W_TeeIterable): # optimization only - chained_list = w_iterable.chained_list + w_chained_list = w_iterable.w_chained_list w_iterator = w_iterable.w_iterator iterators_w = [w_iterable] * n for i in range(1, n): iterators_w[i] = space.wrap(W_TeeIterable(space, w_iterator, - chained_list)) + w_chained_list)) else: w_iterator = space.iter(w_iterable) - chained_list = TeeChainedListNode() + w_chained_list = W_TeeChainedListNode(space) iterators_w = [space.wrap( - W_TeeIterable(space, w_iterator, chained_list)) + W_TeeIterable(space, w_iterator, w_chained_list)) for x in range(n)] return space.newtuple(iterators_w) -class TeeChainedListNode(object): - w_obj = None +class W_TeeChainedListNode(W_Root): + def __init__(self, space): + self.space = space + self.w_next = None + self.w_obj = None + + def reduce_w(self): + list_w = [] + node = self + while node is not None: + if node.w_obj is not None: + list_w.append(node.w_obj) + node = node.w_next + else: + break + space = self.space + if list_w: + return self.space.newtuple([space.type(self), + space.newtuple([]), + space.newtuple([space.newlist(list_w)]) + ]) + else: + return self.space.newtuple([space.type(self), + space.newtuple([])]) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + if len(state) != 1: + raise OperationError(space.w_ValueError, + space.wrap("invalid arguments")) + obj_list_w = space.unpackiterable(state[0]) + node = self + for w_obj in obj_list_w: + node.w_obj = w_obj + node.w_next = W_TeeChainedListNode(self.space) + node = node.w_next + +def W_TeeChainedListNode___new__(space, w_subtype): + r = space.allocate_instance(W_TeeChainedListNode, w_subtype) + r.__init__(space) + return space.wrap(r) + +W_TeeChainedListNode.typedef = TypeDef( + 'itertools._tee_dataobject', + __new__ = interp2app(W_TeeChainedListNode___new__), + __weakref__ = make_weakref_descr(W_TeeChainedListNode), + __reduce__ = interp2app(W_TeeChainedListNode.reduce_w), + __setstate__ = interp2app(W_TeeChainedListNode.descr_setstate) +) + +W_TeeChainedListNode.typedef.acceptable_as_base_class = False class W_TeeIterable(W_Root): - def __init__(self, space, w_iterator, chained_list): + def __init__(self, space, w_iterator, w_chained_list=None): self.space = space self.w_iterator = w_iterator - assert chained_list is not None - self.chained_list = chained_list + self.w_chained_list = w_chained_list def iter_w(self): return self.space.wrap(self) def next_w(self): - chained_list = self.chained_list - w_obj = chained_list.w_obj + w_chained_list = self.w_chained_list + if w_chained_list is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) + w_obj = w_chained_list.w_obj if w_obj is None: - w_obj = self.space.next(self.w_iterator) - chained_list.next = TeeChainedListNode() - chained_list.w_obj = w_obj - self.chained_list = chained_list.next + try: + w_obj = self.space.next(self.w_iterator) + except OperationError, e: + if e.match(self.space, self.space.w_StopIteration): + self.w_chained_list = None + raise + w_chained_list.w_next = W_TeeChainedListNode(self.space) + w_chained_list.w_obj = w_obj + self.w_chained_list = w_chained_list.w_next return w_obj + def reduce_w(self): + return self.space.newtuple([self.space.gettypefor(W_TeeIterable), + self.space.newtuple([self.space.newtuple([])]), + self.space.newtuple([ + self.w_iterator, + self.w_chained_list]) + ]) + def setstate_w(self, w_state): + state = self.space.unpackiterable(w_state) + num_args = len(state) + if num_args != 2: + raise oefmt(self.space.w_TypeError, + "function takes exactly 2 arguments (%d given)", + num_args) + w_iterator, w_chained_list = state + if not isinstance(w_chained_list, W_TeeChainedListNode): + raise oefmt(self.space.w_TypeError, + "must be itertools._tee_dataobject, not %s", + self.space.type(w_chained_list).name) + + self.w_iterator = w_iterator + self.w_chained_list = w_chained_list + def W_TeeIterable___new__(space, w_subtype, w_iterable): - # Obscure and undocumented function. PyPy only supports w_iterable - # being a W_TeeIterable, because the case where it is a general - # iterable is useless and confusing as far as I can tell (as the - # semantics are then slightly different; see the XXX in lib-python's - # test_itertools). - myiter = space.interp_w(W_TeeIterable, w_iterable) - return space.wrap(W_TeeIterable(space, myiter.w_iterator, - myiter.chained_list)) + if isinstance(w_iterable, W_TeeIterable): + myiter = space.interp_w(W_TeeIterable, w_iterable) + w_iterator = myiter.w_iterator + w_chained_list = myiter.w_chained_list + else: + w_iterator = space.iter(w_iterable) + w_chained_list = W_TeeChainedListNode(space) + return W_TeeIterable(space, w_iterator, w_chained_list) W_TeeIterable.typedef = TypeDef( 'itertools._tee', @@ -863,6 +951,8 @@ __iter__ = interp2app(W_TeeIterable.iter_w), __next__ = interp2app(W_TeeIterable.next_w), __weakref__ = make_weakref_descr(W_TeeIterable), + __reduce__ = interp2app(W_TeeIterable.reduce_w), + __setstate__ = interp2app(W_TeeIterable.setstate_w) ) W_TeeIterable.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -14,7 +14,7 @@ self.index = index def getlength(self, space): - if self.w_seq is None: + if space.is_none(self.w_seq): return space.wrap(0) index = self.index w_length = space.len(self.w_seq) @@ -60,7 +60,7 @@ """Sequence iterator implementation for general sequences.""" def descr_next(self, space): - if self.w_seq is None: + if space.is_none(self.w_seq): raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) @@ -79,7 +79,7 @@ def descr_next(self, space): from pypy.objspace.std.listobject import W_ListObject w_seq = self.w_seq - if w_seq is None: + if space.is_none(w_seq): raise OperationError(space.w_StopIteration, space.w_None) assert isinstance(w_seq, W_ListObject) index = self.index @@ -129,7 +129,7 @@ return space.newtuple([new_inst, space.newtuple(tup)]) def descr_length_hint(self, space): - if self.w_seq is None: + if space.is_none(self.w_seq): return space.wrap(0) index = self.index + 1 w_length = space.len(self.w_seq) @@ -147,7 +147,7 @@ return self def descr_next(self, space): - if self.w_seq is None or self.index < 0: + if space.is_none(self.w_seq) or self.index < 0: raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) From pypy.commits at gmail.com Fri Apr 29 17:04:31 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:31 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Sort of fix the remaining itertools tests. Translation is currently broken and I had to use a lot of isinstance assertions to make the annotator happy. Committing to ask about it. Message-ID: <5723cc5f.109a1c0a.25620.ffffc81f@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84038:b8b4f1dbd1bc Date: 2016-04-24 19:28 -0400 http://bitbucket.org/pypy/pypy/changeset/b8b4f1dbd1bc/ Log: Sort of fix the remaining itertools tests. Translation is currently broken and I had to use a lot of isinstance assertions to make the annotator happy. Committing to ask about it. diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -49,6 +49,8 @@ 'starmap' : 'interp_itertools.W_StarMap', 'takewhile' : 'interp_itertools.W_TakeWhile', 'tee' : 'interp_itertools.tee', + '_tee' : 'interp_itertools.W_TeeIterable', + '_tee_chained_list' : 'interp_itertools.W_TeeChainedListNode', 'zip_longest' : 'interp_itertools.W_ZipLongest', } diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -815,53 +815,119 @@ raise OperationError(space.w_ValueError, space.wrap("n must be >= 0")) if isinstance(w_iterable, W_TeeIterable): # optimization only - chained_list = w_iterable.chained_list + w_chained_list = w_iterable.w_chained_list w_iterator = w_iterable.w_iterator iterators_w = [w_iterable] * n for i in range(1, n): iterators_w[i] = space.wrap(W_TeeIterable(space, w_iterator, - chained_list)) + w_chained_list)) else: w_iterator = space.iter(w_iterable) - chained_list = TeeChainedListNode() + w_chained_list = space.wrap(W_TeeChainedListNode(space)) iterators_w = [space.wrap( - W_TeeIterable(space, w_iterator, chained_list)) + W_TeeIterable(space, w_iterator, w_chained_list)) for x in range(n)] return space.newtuple(iterators_w) -class TeeChainedListNode(object): - w_obj = None +class W_TeeChainedListNode(W_Root): + def __init__(self, space): + self.space = space + self.w_next = None + self.w_obj = None + + def reduce_w(self): + list_w = [] + node = self + while node is not None: + #TODO: Why does the annotator need this? + assert isinstance(node, W_TeeChainedListNode) + if node.w_obj is not None: + list_w.append(node.w_obj) + node = node.w_next + space = self.space + if list_w: + return self.space.newtuple([space.type(self), + space.newtuple([]), + space.newtuple([space.newlist(list_w)]) + ]) + else: + return self.space.newtuple([space.type(self), + space.newtuple([])]) + def descr_setstate(self, space, w_state): + state = space.unpackiterable(w_state) + if len(state) != 1: + raise OperationError(space.w_ValueError, + space.wrap("invalid arguments")) + obj_list_w = space.unpackiterable(state[0]) + node = self + for w_obj in obj_list_w: + assert isinstance(node, W_TeeChainedListNode) + node.w_obj = w_obj + node.w_next = W_TeeChainedListNode(self.space) + node = node.w_next + +def W_TeeChainedListNode___new__(space, w_subtype): + r = space.allocate_instance(W_TeeChainedListNode, w_subtype) + r.__init__(space) + return space.wrap(r) + +W_TeeChainedListNode.typedef = TypeDef( + 'itertools._tee_chained_list', + __new__ = interp2app(W_TeeChainedListNode___new__), + __weakref__ = make_weakref_descr(W_TeeChainedListNode), + __reduce__ = interp2app(W_TeeChainedListNode.reduce_w), + __setstate__ = interp2app(W_TeeChainedListNode.descr_setstate) +) + +W_TeeChainedListNode.typedef.acceptable_as_base_class = False class W_TeeIterable(W_Root): - def __init__(self, space, w_iterator, chained_list): + def __init__(self, space, w_iterator, w_chained_list): self.space = space self.w_iterator = w_iterator - assert chained_list is not None - self.chained_list = chained_list + assert w_chained_list is not None + self.w_chained_list = w_chained_list def iter_w(self): return self.space.wrap(self) def next_w(self): - chained_list = self.chained_list - w_obj = chained_list.w_obj + w_chained_list = self.w_chained_list + if w_chained_list is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) + #TODO: Is this the right thing to do? + assert isinstance(w_chained_list, W_TeeChainedListNode) + w_obj = w_chained_list.w_obj if w_obj is None: - w_obj = self.space.next(self.w_iterator) - chained_list.next = TeeChainedListNode() - chained_list.w_obj = w_obj - self.chained_list = chained_list.next + try: + w_obj = self.space.next(self.w_iterator) + except OperationError, e: + if e.match(self.space, self.space.w_StopIteration): + self.w_chained_list = None + raise + w_chained_list.w_next = self.space.wrap(W_TeeChainedListNode(self.space)) + w_chained_list.w_obj = w_obj + self.w_chained_list = w_chained_list.w_next return w_obj -def W_TeeIterable___new__(space, w_subtype, w_iterable): - # Obscure and undocumented function. PyPy only supports w_iterable - # being a W_TeeIterable, because the case where it is a general - # iterable is useless and confusing as far as I can tell (as the - # semantics are then slightly different; see the XXX in lib-python's - # test_itertools). - myiter = space.interp_w(W_TeeIterable, w_iterable) - return space.wrap(W_TeeIterable(space, myiter.w_iterator, - myiter.chained_list)) + def reduce_w(self): + return self.space.newtuple([self.space.gettypefor(W_TeeIterable), + self.space.newtuple([ + self.w_iterator, + self.w_chained_list]) + ]) + + +def W_TeeIterable___new__(space, w_subtype, w_iterable, w_chained_list=None): + if isinstance(w_iterable, W_TeeIterable): + myiter = space.interp_w(W_TeeIterable, w_iterable) + w_iterator = myiter.w_iterator + w_chained_list = myiter.w_chained_list + else: + w_iterator = space.iter(w_iterable) + w_chained_list = w_chained_list or space.wrap(W_TeeChainedListNode(space)) + return space.wrap(W_TeeIterable(space, w_iterator, w_chained_list)) W_TeeIterable.typedef = TypeDef( 'itertools._tee', @@ -869,6 +935,7 @@ __iter__ = interp2app(W_TeeIterable.iter_w), __next__ = interp2app(W_TeeIterable.next_w), __weakref__ = make_weakref_descr(W_TeeIterable), + __reduce__ = interp2app(W_TeeIterable.reduce_w), ) W_TeeIterable.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -14,7 +14,7 @@ self.index = index def getlength(self, space): - if self.w_seq is None: + if self.w_seq is None or space.is_w(self.w_seq, space.w_None): return space.wrap(0) index = self.index w_length = space.len(self.w_seq) @@ -60,7 +60,7 @@ """Sequence iterator implementation for general sequences.""" def descr_next(self, space): - if self.w_seq is None: + if self.w_seq is None or space.is_w(self.w_seq, space.w_None): raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) @@ -79,7 +79,7 @@ def descr_next(self, space): from pypy.objspace.std.listobject import W_ListObject w_seq = self.w_seq - if w_seq is None: + if w_seq is None or space.is_w(w_seq, space.w_None): raise OperationError(space.w_StopIteration, space.w_None) assert isinstance(w_seq, W_ListObject) index = self.index @@ -129,7 +129,7 @@ return space.newtuple([new_inst, space.newtuple(tup)]) def descr_length_hint(self, space): - if self.w_seq is None: + if self.w_seq is None or space.is_w(self.w_seq, space.w_None): return space.wrap(0) index = self.index + 1 w_length = space.len(self.w_seq) @@ -147,7 +147,9 @@ return self def descr_next(self, space): - if self.w_seq is None or self.index < 0: + if (self.w_seq is None + or space.is_w(self.w_seq, space.w_None) + or self.index < 0): raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) From pypy.commits at gmail.com Fri Apr 29 17:04:32 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:32 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fix translation. (Thanks ronan) Message-ID: <5723cc60.171d1c0a.88858.ffffce49@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84039:c84d7d8227be Date: 2016-04-24 21:06 -0400 http://bitbucket.org/pypy/pypy/changeset/c84d7d8227be/ Log: Fix translation. (Thanks ronan) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -424,14 +424,18 @@ start = self.start stop = self.stop if start == -1: - start = None + w_start = space.w_None + else: + w_start = space.wrap(start) if stop == -1: - stop = None + w_stop = space.w_None + else: + w_stop = space.wrap(stop) return space.newtuple([ space.type(self), space.newtuple([self.iterable, - space.wrap(start), - space.wrap(stop), + w_start, + w_stop, space.wrap(self.ignore + 1)]), ]) From pypy.commits at gmail.com Fri Apr 29 17:04:34 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:34 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Got rid of unnecessary space.wrap calls and was then able to get rid of two of the three type assertions. Message-ID: <5723cc62.6322c20a.0c9b.ffffab74@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84040:9f7cb7aded10 Date: 2016-04-24 23:57 -0400 http://bitbucket.org/pypy/pypy/changeset/9f7cb7aded10/ Log: Got rid of unnecessary space.wrap calls and was then able to get rid of two of the three type assertions. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -827,7 +827,7 @@ w_chained_list)) else: w_iterator = space.iter(w_iterable) - w_chained_list = space.wrap(W_TeeChainedListNode(space)) + w_chained_list = W_TeeChainedListNode(space) iterators_w = [space.wrap( W_TeeIterable(space, w_iterator, w_chained_list)) for x in range(n)] @@ -843,8 +843,6 @@ list_w = [] node = self while node is not None: - #TODO: Why does the annotator need this? - assert isinstance(node, W_TeeChainedListNode) if node.w_obj is not None: list_w.append(node.w_obj) node = node.w_next @@ -866,7 +864,6 @@ obj_list_w = space.unpackiterable(state[0]) node = self for w_obj in obj_list_w: - assert isinstance(node, W_TeeChainedListNode) node.w_obj = w_obj node.w_next = W_TeeChainedListNode(self.space) node = node.w_next @@ -910,7 +907,7 @@ if e.match(self.space, self.space.w_StopIteration): self.w_chained_list = None raise - w_chained_list.w_next = self.space.wrap(W_TeeChainedListNode(self.space)) + w_chained_list.w_next = W_TeeChainedListNode(self.space) w_chained_list.w_obj = w_obj self.w_chained_list = w_chained_list.w_next return w_obj @@ -930,7 +927,7 @@ w_chained_list = myiter.w_chained_list else: w_iterator = space.iter(w_iterable) - w_chained_list = w_chained_list or space.wrap(W_TeeChainedListNode(space)) + w_chained_list = w_chained_list or W_TeeChainedListNode(space) return space.wrap(W_TeeIterable(space, w_iterator, w_chained_list)) W_TeeIterable.typedef = TypeDef( From pypy.commits at gmail.com Fri Apr 29 17:04:29 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:29 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fix the islice test. Message-ID: <5723cc5d.a423c20a.acc1.ffffac2d@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84037:40a29d6f9028 Date: 2016-04-22 00:24 -0400 http://bitbucket.org/pypy/pypy/changeset/40a29d6f9028/ Log: Fix the islice test. diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -144,6 +144,7 @@ self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): + return True def chain2(*iterables): 'Pure python version in the docs' @@ -159,6 +160,7 @@ self.assertRaises(TypeError, list,c(2, 3)) def test_chain_from_iterable(self): + return True self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef')) self.assertEqual(list(chain.from_iterable(['abc'])), list('abc')) self.assertEqual(list(chain.from_iterable([''])), []) @@ -166,6 +168,7 @@ self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) def test_chain_reducible(self): + return True operators = [copy.deepcopy, lambda s: pickle.loads(pickle.dumps(s))] for oper in operators: @@ -180,6 +183,7 @@ self.pickletest(chain('abc', 'def'), compare=list('abcdef')) def test_combinations(self): + return True self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable @@ -265,6 +269,7 @@ self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) def test_combinations_with_replacement(self): + return True cwr = combinations_with_replacement self.assertRaises(TypeError, cwr, 'abc') # missing r argument self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments @@ -348,6 +353,7 @@ self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) def test_permutations(self): + return True self.assertRaises(TypeError, permutations) # too few arguments self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, permutations, None) # pool is not iterable @@ -415,6 +421,7 @@ self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) def test_combinatorics(self): + return True # Test relationships between product(), permutations(), # combinations() and combinations_with_replacement(). @@ -448,6 +455,7 @@ self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm def test_compress(self): + return True self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list('')) @@ -482,6 +490,7 @@ def test_count(self): + return True self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)]) @@ -521,6 +530,7 @@ count(1, maxsize+5); sys.exc_info() def test_count_with_stride(self): + return True self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(start=2,step=3)), [('a', 2), ('b', 5), ('c', 8)]) @@ -565,6 +575,7 @@ self.pickletest(count(i, j)) def test_cycle(self): + return True self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) self.assertEqual(list(cycle('')), []) self.assertRaises(TypeError, cycle) @@ -584,6 +595,7 @@ self.pickletest(cycle('abc')) def test_groupby(self): + return True # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) self.assertEqual([], list(groupby([], key=id))) @@ -692,6 +704,7 @@ self.assertRaises(ExpectedError, gulp, [None, None], keyfunc) def test_filter(self): + return True self.assertEqual(list(filter(isEven, range(6))), [0,2,4]) self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2]) self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2]) @@ -717,6 +730,7 @@ self.pickletest(c) def test_filterfalse(self): + return True self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0]) @@ -729,6 +743,7 @@ self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): + return True # XXX This is rather silly now that builtin zip() calls zip()... ans = [(x,y) for x, y in zip('abc',count())] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) @@ -769,6 +784,7 @@ self.pickletest(zip('abc', count())) def test_ziplongest(self): + return True for args in [ ['abc', range(6)], [range(6), 'abc'], @@ -818,12 +834,14 @@ self.assertEqual(len(dict.fromkeys(ids)), len(ids)) def test_zip_longest_pickling(self): + return True self.pickletest(zip_longest("abc", "def")) self.pickletest(zip_longest("abc", "defgh")) self.pickletest(zip_longest("abc", "defgh", fillvalue=1)) self.pickletest(zip_longest("", "defgh")) def test_bug_7244(self): + return True class Repeater: # this class is similar to itertools.repeat @@ -864,6 +882,7 @@ self.assertRaises(RuntimeError, next, it) def test_product(self): + return True for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable @@ -927,6 +946,7 @@ self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) def test_product_pickling(self): + return True # check copy, deepcopy, pickle for args, result in [ ([], [()]), # zero iterables @@ -941,6 +961,7 @@ self.pickletest(product(*args)) def test_repeat(self): + return True self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), [(0, 'a'), (1, 'a'), (2, 'a')]) @@ -966,6 +987,7 @@ self.pickletest(repeat(object='a', times=10)) def test_map(self): + return True self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) self.assertEqual(list(map(tupleize, 'abc', range(5))), @@ -995,6 +1017,7 @@ self.pickletest(c) def test_starmap(self): + return True self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))), @@ -1083,9 +1106,14 @@ list(range(*args))) self.assertEqual(list(copy.deepcopy(islice(range(100), *args))), list(range(*args))) + print(args, "ARGS") + t = open('/home/lgfdev/blah', "w") + t.write(str(args)) + t.close() self.pickletest(islice(range(100), *args)) def test_takewhile(self): + return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) @@ -1105,6 +1133,7 @@ self.pickletest(takewhile(underten, data)) def test_dropwhile(self): + return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) @@ -1121,6 +1150,7 @@ self.pickletest(dropwhile(underten, data)) def test_tee(self): + return True n = 200 a, b = tee([]) # test empty iterator @@ -1269,11 +1299,13 @@ # Issue 13454: Crash when deleting backward iterator from tee() def test_tee_del_backward(self): + return True forward, backward = tee(repeat(None, 20000000)) any(forward) # exhaust the iterator del backward def test_StopIteration(self): + return True self.assertRaises(StopIteration, next, zip()) for f in (chain, cycle, zip, groupby): @@ -1302,6 +1334,7 @@ self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) def test_accumulate_reducible(self): + return True # check copy, deepcopy, pickle data = [1, 2, 3, 4, 5] accumulated = [1, 3, 6, 10, 15] @@ -1314,46 +1347,70 @@ self.assertEqual(list(copy.copy(it)), accumulated[1:]) def test_chain(self): + return True + # check copy, deepcopy, pickle self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') def test_chain_from_iterable(self): + return True + # check copy, deepcopy, pickle self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF') def test_combinations(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(combinations('ABCD', 2)), [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) self.assertEqual(list(combinations(range(4), 3)), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) def test_combinations_with_replacement(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(combinations_with_replacement('ABC', 2)), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def test_compress(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) def test_count(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14]) def test_cycle(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD')) def test_dropwhile(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1]) def test_groupby(self): + return True + # check copy, deepcopy, pickle self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')], list('ABCDAB')) self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')], [list('AAAA'), list('BBB'), list('CC'), list('D')]) def test_filter(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9]) def test_filterfalse(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8]) def test_map(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000]) def test_islice(self): @@ -1361,21 +1418,29 @@ self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD')) self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG')) self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG')) - +""" def test_zip(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')]) def test_zip_longest(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')), [('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')]) def test_permutations(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(permutations('ABCD', 2)), list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))) self.assertEqual(list(permutations(range(3))), [(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)]) def test_product(self): + return True + # check copy, deepcopy, pickle self.assertEqual(list(product('ABCD', 'xy')), list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))) self.assertEqual(list(product(range(2), repeat=3)), @@ -1383,16 +1448,19 @@ (1,0,0), (1,0,1), (1,1,0), (1,1,1)]) def test_repeat(self): + return True self.assertEqual(list(repeat(10, 3)), [10, 10, 10]) def test_stapmap(self): + return True self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])), [32, 9, 1000]) def test_takewhile(self): + return True self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4]) - +""" class TestGC(unittest.TestCase): def makecycle(self, iterator, container): @@ -1404,7 +1472,12 @@ a = [] self.makecycle(accumulate([1,2,a,3]), a) + def test_islice(self): + a = [] + self.makecycle(islice([a]*2, None), a) +""" def test_chain(self): + return True a = [] self.makecycle(chain(a), a) @@ -1470,9 +1543,6 @@ a = [] self.makecycle(map(lambda x:x, [a]*2), a) - def test_islice(self): - a = [] - self.makecycle(islice([a]*2, None), a) def test_permutations(self): a = [] @@ -1493,7 +1563,7 @@ def test_takewhile(self): a = [] self.makecycle(takewhile(bool, [1, 0, a, a]), a) - +""" def R(seqn): 'Regular generator' for i in seqn: @@ -1570,10 +1640,17 @@ 'Test multiple tiers of iterators' return chain(map(lambda x:x, R(Ig(G(seqn))))) - class TestVariousIteratorArgs(unittest.TestCase): + def test_islice(self): + for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): + for g in (G, I, Ig, S, L, R): + self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) + self.assertRaises(TypeError, islice, X(s), 10) + self.assertRaises(TypeError, islice, N(s), 10) + self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_accumulate(self): + return True s = [1,2,3,4,5] r = [1,3,6,10,15] n = len(s) @@ -1583,7 +1660,7 @@ self.assertRaises(TypeError, accumulate, X(s)) self.assertRaises(TypeError, accumulate, N(s)) self.assertRaises(ZeroDivisionError, list, accumulate(E(s))) - +""" def test_chain(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): @@ -1674,13 +1751,6 @@ self.assertRaises(TypeError, map, onearg, N(s)) self.assertRaises(ZeroDivisionError, list, map(onearg, E(s))) - def test_islice(self): - for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): - for g in (G, I, Ig, S, L, R): - self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) - self.assertRaises(TypeError, islice, X(s), 10) - self.assertRaises(TypeError, islice, N(s), 10) - self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_starmap(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): @@ -1808,7 +1878,7 @@ except TypeError as err: # we expect type errors because of wrong argument count self.assertNotIn("does not take keyword arguments", err.args[0]) - +""" libreftest = """ Doctest for examples in the library reference: libitertools.tex @@ -2042,9 +2112,9 @@ __test__ = {'libreftest' : libreftest} def test_main(verbose=None): - test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC, - RegressionTests, LengthTransparency, - SubclassWithKwargsTest, TestExamples) + test_classes = (TestBasicOps, TestExamples, TestGC, TestVariousIteratorArgs)#, TestGC, + #RegressionTests, LengthTransparency, + #SubclassWithKwargsTest, TestExamples) support.run_unittest(*test_classes) # verify reference counting diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -318,6 +318,7 @@ def __init__(self, space, w_iterable, w_startstop, args_w): self.iterable = space.iter(w_iterable) self.space = space + self.exhausted = False num_args = len(args_w) @@ -326,7 +327,7 @@ w_stop = w_startstop elif num_args <= 2: if space.is_w(w_startstop, space.w_None): - start = 0 + start = -1 else: start = self.arg_int_w(w_startstop, 0, "Indicies for islice() must be None or non-negative integers") @@ -383,24 +384,24 @@ # has no effect any more if stop > 0: self._ignore_items(stop) - self.iterable = None + self.exhausted = True raise OperationError(self.space.w_StopIteration, self.space.w_None) self.stop = stop - (ignore + 1) if ignore > 0: self._ignore_items(ignore) - if self.iterable is None: + if self.exhausted: raise OperationError(self.space.w_StopIteration, self.space.w_None) try: return self.space.next(self.iterable) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): - self.iterable = None + self.exhausted = True raise def _ignore_items(self, num): w_iterator = self.iterable - if w_iterator is None: + if self.exhausted: raise OperationError(self.space.w_StopIteration, self.space.w_None) tp = self.space.type(w_iterator) @@ -413,18 +414,24 @@ self.space.next(w_iterator) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): - self.iterable = None + self.exhausted = True raise num -= 1 if num <= 0: break def descr_reduce(self, space): + start = self.start + stop = self.stop + if start == -1: + start = None + if stop == -1: + stop = None return space.newtuple([ space.type(self), space.newtuple([self.iterable, - space.wrap(self.start), - space.wrap(self.stop), + space.wrap(start), + space.wrap(stop), space.wrap(self.ignore + 1)]), ]) From pypy.commits at gmail.com Fri Apr 29 17:04:36 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:36 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Get rid of all of the changes in test_itertools I never meant to actually commit. Message-ID: <5723cc64.a553c20a.9244d.ffffc346@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84041:5adbc0348e12 Date: 2016-04-25 02:03 -0400 http://bitbucket.org/pypy/pypy/changeset/5adbc0348e12/ Log: Get rid of all of the changes in test_itertools I never meant to actually commit. diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -144,7 +144,6 @@ self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): - return True def chain2(*iterables): 'Pure python version in the docs' @@ -160,7 +159,6 @@ self.assertRaises(TypeError, list,c(2, 3)) def test_chain_from_iterable(self): - return True self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef')) self.assertEqual(list(chain.from_iterable(['abc'])), list('abc')) self.assertEqual(list(chain.from_iterable([''])), []) @@ -168,7 +166,6 @@ self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) def test_chain_reducible(self): - return True operators = [copy.deepcopy, lambda s: pickle.loads(pickle.dumps(s))] for oper in operators: @@ -183,7 +180,6 @@ self.pickletest(chain('abc', 'def'), compare=list('abcdef')) def test_combinations(self): - return True self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable @@ -269,7 +265,6 @@ self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) def test_combinations_with_replacement(self): - return True cwr = combinations_with_replacement self.assertRaises(TypeError, cwr, 'abc') # missing r argument self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments @@ -353,7 +348,6 @@ self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) def test_permutations(self): - return True self.assertRaises(TypeError, permutations) # too few arguments self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, permutations, None) # pool is not iterable @@ -421,7 +415,6 @@ self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) def test_combinatorics(self): - return True # Test relationships between product(), permutations(), # combinations() and combinations_with_replacement(). @@ -455,7 +448,6 @@ self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm def test_compress(self): - return True self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list('')) @@ -490,7 +482,6 @@ def test_count(self): - return True self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)]) @@ -530,7 +521,6 @@ count(1, maxsize+5); sys.exc_info() def test_count_with_stride(self): - return True self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(start=2,step=3)), [('a', 2), ('b', 5), ('c', 8)]) @@ -575,7 +565,6 @@ self.pickletest(count(i, j)) def test_cycle(self): - return True self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) self.assertEqual(list(cycle('')), []) self.assertRaises(TypeError, cycle) @@ -595,7 +584,6 @@ self.pickletest(cycle('abc')) def test_groupby(self): - return True # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) self.assertEqual([], list(groupby([], key=id))) @@ -704,7 +692,6 @@ self.assertRaises(ExpectedError, gulp, [None, None], keyfunc) def test_filter(self): - return True self.assertEqual(list(filter(isEven, range(6))), [0,2,4]) self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2]) self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2]) @@ -730,7 +717,6 @@ self.pickletest(c) def test_filterfalse(self): - return True self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0]) @@ -743,7 +729,6 @@ self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): - return True # XXX This is rather silly now that builtin zip() calls zip()... ans = [(x,y) for x, y in zip('abc',count())] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) @@ -784,7 +769,6 @@ self.pickletest(zip('abc', count())) def test_ziplongest(self): - return True for args in [ ['abc', range(6)], [range(6), 'abc'], @@ -834,14 +818,12 @@ self.assertEqual(len(dict.fromkeys(ids)), len(ids)) def test_zip_longest_pickling(self): - return True self.pickletest(zip_longest("abc", "def")) self.pickletest(zip_longest("abc", "defgh")) self.pickletest(zip_longest("abc", "defgh", fillvalue=1)) self.pickletest(zip_longest("", "defgh")) def test_bug_7244(self): - return True class Repeater: # this class is similar to itertools.repeat @@ -882,7 +864,6 @@ self.assertRaises(RuntimeError, next, it) def test_product(self): - return True for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable @@ -946,7 +927,6 @@ self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) def test_product_pickling(self): - return True # check copy, deepcopy, pickle for args, result in [ ([], [()]), # zero iterables @@ -961,7 +941,6 @@ self.pickletest(product(*args)) def test_repeat(self): - return True self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), [(0, 'a'), (1, 'a'), (2, 'a')]) @@ -987,7 +966,6 @@ self.pickletest(repeat(object='a', times=10)) def test_map(self): - return True self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) self.assertEqual(list(map(tupleize, 'abc', range(5))), @@ -1017,7 +995,6 @@ self.pickletest(c) def test_starmap(self): - return True self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))), @@ -1106,14 +1083,9 @@ list(range(*args))) self.assertEqual(list(copy.deepcopy(islice(range(100), *args))), list(range(*args))) - print(args, "ARGS") - t = open('/home/lgfdev/blah', "w") - t.write(str(args)) - t.close() self.pickletest(islice(range(100), *args)) def test_takewhile(self): - return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) @@ -1133,7 +1105,6 @@ self.pickletest(takewhile(underten, data)) def test_dropwhile(self): - return True data = [1, 3, 5, 20, 2, 4, 6, 8] self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) @@ -1150,7 +1121,6 @@ self.pickletest(dropwhile(underten, data)) def test_tee(self): - return True n = 200 a, b = tee([]) # test empty iterator @@ -1234,8 +1204,7 @@ p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a - self.assertRaises(ReferenceError, getattr, p, '__class__') - + #self.assertRaises(ReferenceError, getattr, p, '__class__') ans = list('abc') long_ans = list(range(10000)) @@ -1299,13 +1268,11 @@ # Issue 13454: Crash when deleting backward iterator from tee() def test_tee_del_backward(self): - return True forward, backward = tee(repeat(None, 20000000)) any(forward) # exhaust the iterator del backward def test_StopIteration(self): - return True self.assertRaises(StopIteration, next, zip()) for f in (chain, cycle, zip, groupby): @@ -1334,7 +1301,6 @@ self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) def test_accumulate_reducible(self): - return True # check copy, deepcopy, pickle data = [1, 2, 3, 4, 5] accumulated = [1, 3, 6, 10, 15] @@ -1347,70 +1313,46 @@ self.assertEqual(list(copy.copy(it)), accumulated[1:]) def test_chain(self): - return True - # check copy, deepcopy, pickle self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') def test_chain_from_iterable(self): - return True - # check copy, deepcopy, pickle self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF') def test_combinations(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(combinations('ABCD', 2)), [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) self.assertEqual(list(combinations(range(4), 3)), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) def test_combinations_with_replacement(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(combinations_with_replacement('ABC', 2)), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def test_compress(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) def test_count(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14]) def test_cycle(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD')) def test_dropwhile(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1]) def test_groupby(self): - return True - # check copy, deepcopy, pickle self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')], list('ABCDAB')) self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')], [list('AAAA'), list('BBB'), list('CC'), list('D')]) def test_filter(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9]) def test_filterfalse(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8]) def test_map(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000]) def test_islice(self): @@ -1418,29 +1360,21 @@ self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD')) self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG')) self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG')) -""" + def test_zip(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')]) def test_zip_longest(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')), [('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')]) def test_permutations(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(permutations('ABCD', 2)), list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))) self.assertEqual(list(permutations(range(3))), [(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)]) def test_product(self): - return True - # check copy, deepcopy, pickle self.assertEqual(list(product('ABCD', 'xy')), list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))) self.assertEqual(list(product(range(2), repeat=3)), @@ -1448,19 +1382,16 @@ (1,0,0), (1,0,1), (1,1,0), (1,1,1)]) def test_repeat(self): - return True self.assertEqual(list(repeat(10, 3)), [10, 10, 10]) def test_stapmap(self): - return True self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])), [32, 9, 1000]) def test_takewhile(self): - return True self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4]) -""" + class TestGC(unittest.TestCase): def makecycle(self, iterator, container): @@ -1472,12 +1403,7 @@ a = [] self.makecycle(accumulate([1,2,a,3]), a) - def test_islice(self): - a = [] - self.makecycle(islice([a]*2, None), a) -""" def test_chain(self): - return True a = [] self.makecycle(chain(a), a) @@ -1543,6 +1469,9 @@ a = [] self.makecycle(map(lambda x:x, [a]*2), a) + def test_islice(self): + a = [] + self.makecycle(islice([a]*2, None), a) def test_permutations(self): a = [] @@ -1563,7 +1492,7 @@ def test_takewhile(self): a = [] self.makecycle(takewhile(bool, [1, 0, a, a]), a) -""" + def R(seqn): 'Regular generator' for i in seqn: @@ -1642,15 +1571,7 @@ class TestVariousIteratorArgs(unittest.TestCase): - def test_islice(self): - for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): - for g in (G, I, Ig, S, L, R): - self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) - self.assertRaises(TypeError, islice, X(s), 10) - self.assertRaises(TypeError, islice, N(s), 10) - self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_accumulate(self): - return True s = [1,2,3,4,5] r = [1,3,6,10,15] n = len(s) @@ -1660,7 +1581,7 @@ self.assertRaises(TypeError, accumulate, X(s)) self.assertRaises(TypeError, accumulate, N(s)) self.assertRaises(ZeroDivisionError, list, accumulate(E(s))) -""" + def test_chain(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): @@ -1751,6 +1672,13 @@ self.assertRaises(TypeError, map, onearg, N(s)) self.assertRaises(ZeroDivisionError, list, map(onearg, E(s))) + def test_islice(self): + for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): + for g in (G, I, Ig, S, L, R): + self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) + self.assertRaises(TypeError, islice, X(s), 10) + self.assertRaises(TypeError, islice, N(s), 10) + self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_starmap(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): @@ -1878,7 +1806,7 @@ except TypeError as err: # we expect type errors because of wrong argument count self.assertNotIn("does not take keyword arguments", err.args[0]) -""" + libreftest = """ Doctest for examples in the library reference: libitertools.tex @@ -2112,9 +2040,9 @@ __test__ = {'libreftest' : libreftest} def test_main(verbose=None): - test_classes = (TestBasicOps, TestExamples, TestGC, TestVariousIteratorArgs)#, TestGC, - #RegressionTests, LengthTransparency, - #SubclassWithKwargsTest, TestExamples) + test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC, + RegressionTests, LengthTransparency, + SubclassWithKwargsTest, TestExamples) support.run_unittest(*test_classes) # verify reference counting From pypy.commits at gmail.com Fri Apr 29 17:04:37 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:37 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fix infinite loop bug in reduce. Message-ID: <5723cc65.cf8ec20a.1afa0.ffffb426@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84042:dfeff15ecc62 Date: 2016-04-25 02:05 -0400 http://bitbucket.org/pypy/pypy/changeset/dfeff15ecc62/ Log: Fix infinite loop bug in reduce. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -846,6 +846,8 @@ if node.w_obj is not None: list_w.append(node.w_obj) node = node.w_next + else: + break space = self.space if list_w: return self.space.newtuple([space.type(self), @@ -917,8 +919,7 @@ self.space.newtuple([ self.w_iterator, self.w_chained_list]) - ]) - + ]) def W_TeeIterable___new__(space, w_subtype, w_iterable, w_chained_list=None): if isinstance(w_iterable, W_TeeIterable): From pypy.commits at gmail.com Fri Apr 29 17:04:41 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:41 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Get rid of out-of-date comment. Message-ID: <5723cc69.2a18c20a.a67b2.ffffbbce@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84044:3dae50cc472c Date: 2016-04-29 07:47 -0400 http://bitbucket.org/pypy/pypy/changeset/3dae50cc472c/ Log: Get rid of out-of-date comment. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -898,7 +898,6 @@ w_chained_list = self.w_chained_list if w_chained_list is None: raise OperationError(self.space.w_StopIteration, self.space.w_None) - #TODO: Is this the right thing to do? w_obj = w_chained_list.w_obj if w_obj is None: try: From pypy.commits at gmail.com Fri Apr 29 17:04:43 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:43 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Respond to review. Message-ID: <5723cc6b.46291c0a.c36fb.ffffce08@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84045:4608f2a4e438 Date: 2016-04-29 15:01 -0400 http://bitbucket.org/pypy/pypy/changeset/4608f2a4e438/ Log: Respond to review. diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -4,7 +4,7 @@ from weakref import proxy from decimal import Decimal from fractions import Fraction -import sys +import sys, gc import operator import random import copy @@ -1204,7 +1204,8 @@ p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a - #self.assertRaises(ReferenceError, getattr, p, '__class__') + gc.collect() + self.assertRaises(ReferenceError, getattr, p, '__class__') ans = list('abc') long_ans = list(range(10000)) @@ -1569,6 +1570,7 @@ 'Test multiple tiers of iterators' return chain(map(lambda x:x, R(Ig(G(seqn))))) + class TestVariousIteratorArgs(unittest.TestCase): def test_accumulate(self): diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1,5 +1,5 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from rpython.rlib import jit @@ -922,15 +922,14 @@ state = self.space.unpackiterable(w_state) num_args = len(state) if num_args != 2: - raise OperationError(self.space.w_TypeError, - self.space.wrap("function takes exactly 2 arguments " - "(" + str(num_args) + " given)")) + raise oefmt(self.space.w_TypeError, + "function takes exactly 2 arguments (%d given)", + num_args) w_iterator, w_chained_list = state if not isinstance(w_chained_list, W_TeeChainedListNode): - raise OperationError( - self.space.w_TypeError, - self.space.wrap("must be itertools._tee_dataobject, not " + - self.space.type(w_chained_list).name)) + raise oefmt(self.space.w_TypeError, + "must be itertools._tee_dataobject, not %s", + self.space.type(w_chained_list).name) self.w_iterator = w_iterator self.w_chained_list = w_chained_list diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -14,7 +14,7 @@ self.index = index def getlength(self, space): - if self.w_seq is None or space.is_w(self.w_seq, space.w_None): + if space.is_none(self.w_seq): return space.wrap(0) index = self.index w_length = space.len(self.w_seq) @@ -60,7 +60,7 @@ """Sequence iterator implementation for general sequences.""" def descr_next(self, space): - if self.w_seq is None or space.is_w(self.w_seq, space.w_None): + if space.is_none(self.w_seq): raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) @@ -79,7 +79,7 @@ def descr_next(self, space): from pypy.objspace.std.listobject import W_ListObject w_seq = self.w_seq - if w_seq is None or space.is_w(w_seq, space.w_None): + if space.is_none(w_seq): raise OperationError(space.w_StopIteration, space.w_None) assert isinstance(w_seq, W_ListObject) index = self.index @@ -129,7 +129,7 @@ return space.newtuple([new_inst, space.newtuple(tup)]) def descr_length_hint(self, space): - if self.w_seq is None or space.is_w(self.w_seq, space.w_None): + if space.is_none(self.w_seq): return space.wrap(0) index = self.index + 1 w_length = space.len(self.w_seq) @@ -147,9 +147,7 @@ return self def descr_next(self, space): - if (self.w_seq is None - or space.is_w(self.w_seq, space.w_None) - or self.index < 0): + if space.is_none(self.w_seq) or self.index < 0: raise OperationError(space.w_StopIteration, space.w_None) try: w_item = space.getitem(self.w_seq, space.wrap(self.index)) From pypy.commits at gmail.com Fri Apr 29 17:04:44 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:44 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Use support.gc_collect instead of regular gc.collect Message-ID: <5723cc6c.08121c0a.2efd9.ffffd193@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84046:a940916c3f40 Date: 2016-04-29 15:08 -0400 http://bitbucket.org/pypy/pypy/changeset/a940916c3f40/ Log: Use support.gc_collect instead of regular gc.collect diff --git a/lib-python/3/test/test_itertools.py b/lib-python/3/test/test_itertools.py --- a/lib-python/3/test/test_itertools.py +++ b/lib-python/3/test/test_itertools.py @@ -4,7 +4,7 @@ from weakref import proxy from decimal import Decimal from fractions import Fraction -import sys, gc +import sys import operator import random import copy @@ -1204,8 +1204,9 @@ p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a - gc.collect() + support.gc_collect() self.assertRaises(ReferenceError, getattr, p, '__class__') + ans = list('abc') long_ans = list(range(10000)) From pypy.commits at gmail.com Fri Apr 29 17:04:39 2016 From: pypy.commits at gmail.com (marky1991) Date: Fri, 29 Apr 2016 14:04:39 -0700 (PDT) Subject: [pypy-commit] pypy 33_fix_itertools: Fix final two failing tests. Message-ID: <5723cc67.08121c0a.2efd9.ffffd190@mx.google.com> Author: Mark Young Branch: 33_fix_itertools Changeset: r84043:94204cd7d00d Date: 2016-04-28 20:48 -0400 http://bitbucket.org/pypy/pypy/changeset/94204cd7d00d/ Log: Fix final two failing tests. diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -50,7 +50,7 @@ 'takewhile' : 'interp_itertools.W_TakeWhile', 'tee' : 'interp_itertools.tee', '_tee' : 'interp_itertools.W_TeeIterable', - '_tee_chained_list' : 'interp_itertools.W_TeeChainedListNode', + '_tee_dataobject' : 'interp_itertools.W_TeeChainedListNode', 'zip_longest' : 'interp_itertools.W_ZipLongest', } diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -876,7 +876,7 @@ return space.wrap(r) W_TeeChainedListNode.typedef = TypeDef( - 'itertools._tee_chained_list', + 'itertools._tee_dataobject', __new__ = interp2app(W_TeeChainedListNode___new__), __weakref__ = make_weakref_descr(W_TeeChainedListNode), __reduce__ = interp2app(W_TeeChainedListNode.reduce_w), @@ -886,10 +886,9 @@ W_TeeChainedListNode.typedef.acceptable_as_base_class = False class W_TeeIterable(W_Root): - def __init__(self, space, w_iterator, w_chained_list): + def __init__(self, space, w_iterator, w_chained_list=None): self.space = space self.w_iterator = w_iterator - assert w_chained_list is not None self.w_chained_list = w_chained_list def iter_w(self): @@ -900,7 +899,6 @@ if w_chained_list is None: raise OperationError(self.space.w_StopIteration, self.space.w_None) #TODO: Is this the right thing to do? - assert isinstance(w_chained_list, W_TeeChainedListNode) w_obj = w_chained_list.w_obj if w_obj is None: try: @@ -916,20 +914,37 @@ def reduce_w(self): return self.space.newtuple([self.space.gettypefor(W_TeeIterable), + self.space.newtuple([self.space.newtuple([])]), self.space.newtuple([ self.w_iterator, self.w_chained_list]) ]) + def setstate_w(self, w_state): + state = self.space.unpackiterable(w_state) + num_args = len(state) + if num_args != 2: + raise OperationError(self.space.w_TypeError, + self.space.wrap("function takes exactly 2 arguments " + "(" + str(num_args) + " given)")) + w_iterator, w_chained_list = state + if not isinstance(w_chained_list, W_TeeChainedListNode): + raise OperationError( + self.space.w_TypeError, + self.space.wrap("must be itertools._tee_dataobject, not " + + self.space.type(w_chained_list).name)) -def W_TeeIterable___new__(space, w_subtype, w_iterable, w_chained_list=None): + self.w_iterator = w_iterator + self.w_chained_list = w_chained_list + +def W_TeeIterable___new__(space, w_subtype, w_iterable): if isinstance(w_iterable, W_TeeIterable): myiter = space.interp_w(W_TeeIterable, w_iterable) w_iterator = myiter.w_iterator w_chained_list = myiter.w_chained_list else: w_iterator = space.iter(w_iterable) - w_chained_list = w_chained_list or W_TeeChainedListNode(space) - return space.wrap(W_TeeIterable(space, w_iterator, w_chained_list)) + w_chained_list = W_TeeChainedListNode(space) + return W_TeeIterable(space, w_iterator, w_chained_list) W_TeeIterable.typedef = TypeDef( 'itertools._tee', @@ -938,6 +953,7 @@ __next__ = interp2app(W_TeeIterable.next_w), __weakref__ = make_weakref_descr(W_TeeIterable), __reduce__ = interp2app(W_TeeIterable.reduce_w), + __setstate__ = interp2app(W_TeeIterable.setstate_w) ) W_TeeIterable.typedef.acceptable_as_base_class = False From pypy.commits at gmail.com Fri Apr 29 19:38:36 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 29 Apr 2016 16:38:36 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: fix fix fix Message-ID: <5723f07c.8bd31c0a.50429.fffff53c@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84048:7c8c1f693f28 Date: 2016-04-30 00:37 +0100 http://bitbucket.org/pypy/pypy/changeset/7c8c1f693f28/ Log: fix fix fix diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -461,7 +461,7 @@ api_func = slot_tp_iter.api_func elif name == 'tp_iternext': - iternext_fn = w_type.getdictvalue(space, 'next') + iternext_fn = w_type.getdictvalue(space, '__next__') if iternext_fn is None: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -545,11 +545,6 @@ return type->tp_call(obj, c_args, NULL); ''')]) - class C: - def __call__(self, *args): - return args - assert module.tp_call(type(C()), C(), ('x', 2)) == ('x', 2) - class D(type): def __call__(self, *args): return "foo! %r" % (args,) @@ -601,10 +596,6 @@ ''' ) ]) - class C: - def __str__(self): - return "text" - assert module.tp_str(type(C()), C()) == "text" class D(int): def __str__(self): return "more text" @@ -683,9 +674,7 @@ PyErr_SetNone(PyExc_ValueError); return NULL; } - return type->tp_iter(obj); - ''' - ), + return type->tp_iter(obj);'''), ("tp_iternext", "METH_VARARGS", ''' PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); @@ -697,17 +686,16 @@ return NULL; } result = type->tp_iternext(obj); + /* In py3, returning NULL from tp_iternext means the iterator + * is exhausted */ if (!result && !PyErr_Occurred()) result = PyBytes_FromString("stop!"); - return result; - ''' - ) - ]) + return result;''')]) l = [1] it = module.tp_iter(list, l) assert type(it) is type(iter([])) assert module.tp_iternext(type(it), it) == 1 - raises(StopIteration, module.tp_iternext, type(it), it) + assert module.tp_iternext(type(it), it) == b'stop!' # class LL(list): def __iter__(self): From pypy.commits at gmail.com Sat Apr 30 03:15:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 00:15:34 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <57245b96.0c371c0a.907eb.4379@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r741:8b13f0f2008d Date: 2016-04-30 09:16 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8b13f0f2008d/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63833 of $105000 (60.8%) + $63928 of $105000 (60.9%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sat Apr 30 03:41:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 00:41:50 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: translation fixes (in-progress) Message-ID: <572461be.55301c0a.0d92.4c35@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84049:ea76859f9845 Date: 2016-04-29 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ea76859f9845/ Log: translation fixes (in-progress) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -747,6 +747,11 @@ "module calls %r" % (funcname,)) @dont_inline +def not_supposed_to_fail(funcname): + raise SystemError("The function '%s' was not supposed to fail" + % (funcname,)) + + at dont_inline def unexpected_exception(funcname, e, tb): print 'Fatal error in cpyext, CPython compatibility layer, calling',funcname print 'Either report a bug or consider not using this particular extension' @@ -778,6 +783,8 @@ if isinstance(restype, lltype.Ptr) and error_value == 0: error_value = lltype.nullptr(restype.TO) + if error_value is not CANNOT_FAIL: + assert lltype.typeOf(error_value) == lltype.typeOf(fatal_value) def wrapper_second_level(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj @@ -785,7 +792,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer callable = args[-1] - args = args[:len(args)-1] + args = args[:-1] # see "Handling of the GIL" above (careful, we don't have the GIL here) tid = rthread.get_or_make_ident() @@ -850,8 +857,7 @@ if failed: if error_value is CANNOT_FAIL: - raise SystemError("The function '%s' was not supposed to fail" - % (callable.__name__,)) + raise not_supposed_to_fail(callable2name[callable]) retval = error_value elif is_PyObject(restype): @@ -865,7 +871,7 @@ retval = make_ref(space, result) retval = rffi.cast(restype, retval) else: - retval = lltype.nullptr(PyObject.TO) + retval = lltype.nullptr(restype.TO) elif restype is not lltype.Void: retval = rffi.cast(restype, result) except Exception, e: From pypy.commits at gmail.com Sat Apr 30 03:41:52 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 00:41:52 -0700 (PDT) Subject: [pypy-commit] pypy default: More precise info about why this segfaults on CPython Message-ID: <572461c0.2457c20a.4ec44.3bd7@mx.google.com> Author: Armin Rigo Branch: Changeset: r84050:7fdc241191af Date: 2016-04-30 09:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7fdc241191af/ Log: More precise info about why this segfaults on CPython diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -366,7 +366,7 @@ def test_ufunc(self): if self.runappdirect: from numpy import arange - py.test.xfail('why does this segfault on cpython?') + py.test.xfail('segfaults on cpython: PyUFunc_API == NULL?') else: from _numpypy.multiarray import arange mod = self.import_extension('foo', [ From pypy.commits at gmail.com Sat Apr 30 04:40:46 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 01:40:46 -0700 (PDT) Subject: [pypy-commit] pypy default: Test and fix for yet another very obscure misfeature of ctypes Message-ID: <57246f8e.6a70c20a.ed3cf.4465@mx.google.com> Author: Armin Rigo Branch: Changeset: r84052:9f9c409ee27e Date: 2016-04-30 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9f9c409ee27e/ Log: Test and fix for yet another very obscure misfeature of ctypes diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -1009,12 +1009,22 @@ container = _array_of_known_length(T.TO) container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): + # cobj is a CFunctionType object. We naively think + # that it should be a function pointer. No no no. If + # it was read out of an array, say, then it is a *pointer* + # to a function pointer. In other words, the read doesn't + # read anything, it just takes the address of the function + # pointer inside the array. If later the array is modified + # or goes out of scope, then we crash. CTypes is fun. + # It works if we cast it now to an int and back. cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: container = _int2obj[cobjkey] else: + name = getattr(cobj, '__name__', '?') + cobj = ctypes.cast(cobjkey, type(cobj)) _callable = get_ctypes_trampoline(T.TO, cobj) - return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'), + return lltype.functionptr(T.TO, name, _callable=_callable) elif isinstance(T.TO, lltype.OpaqueType): if T == llmemory.GCREF: diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -1405,6 +1405,45 @@ a2 = ctypes2lltype(lltype.Ptr(A), lltype2ctypes(a)) assert a2._obj.getitem(0)._obj._parentstructure() is a2._obj + def test_array_of_function_pointers(self): + c_source = py.code.Source(r""" + #include "src/precommondefs.h" + #include + + typedef int(*funcptr_t)(void); + static int forty_two(void) { return 42; } + static int forty_three(void) { return 43; } + static funcptr_t testarray[2]; + RPY_EXPORTED void runtest(void cb(funcptr_t *)) { + testarray[0] = &forty_two; + testarray[1] = &forty_three; + fprintf(stderr, "&forty_two = %p\n", testarray[0]); + fprintf(stderr, "&forty_three = %p\n", testarray[1]); + cb(testarray); + testarray[0] = 0; + testarray[1] = 0; + } + """) + eci = ExternalCompilationInfo(include_dirs=[cdir], + separate_module_sources=[c_source]) + + PtrF = lltype.Ptr(lltype.FuncType([], rffi.INT)) + ArrayPtrF = rffi.CArrayPtr(PtrF) + CALLBACK = rffi.CCallback([ArrayPtrF], lltype.Void) + + runtest = rffi.llexternal('runtest', [CALLBACK], lltype.Void, + compilation_info=eci) + seen = [] + + def callback(testarray): + seen.append(testarray[0]) # read a PtrF out of testarray + seen.append(testarray[1]) + + runtest(callback) + assert seen[0]() == 42 + assert seen[1]() == 43 + + class TestPlatform(object): def test_lib_on_libpaths(self): from rpython.translator.platform import platform From pypy.commits at gmail.com Sat Apr 30 04:40:44 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 01:40:44 -0700 (PDT) Subject: [pypy-commit] pypy default: Simplify stuff Message-ID: <57246f8c.4374c20a.52888.4c97@mx.google.com> Author: Armin Rigo Branch: Changeset: r84051:bc001996d331 Date: 2016-04-30 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/bc001996d331/ Log: Simplify stuff diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -231,17 +231,7 @@ assert max_n >= 0 ITEM = A.OF ctypes_item = get_ctypes_type(ITEM, delayed_builders) - # Python 2.5 ctypes can raise OverflowError on 64-bit builds - for n in [maxint, 2**31]: - MAX_SIZE = n/64 - try: - PtrType = ctypes.POINTER(MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass # ^^^ bah, blame ctypes - else: - break - else: - raise e + ctypes_item_ptr = ctypes.POINTER(ctypes_item) class CArray(ctypes.Structure): if is_emulated_long: @@ -265,35 +255,9 @@ bigarray.length = n return bigarray - _ptrtype = None - - @classmethod - def _get_ptrtype(cls): - if cls._ptrtype: - return cls._ptrtype - # ctypes can raise OverflowError on 64-bit builds - # on windows it raises AttributeError even for 2**31 (_length_ missing) - if _MS_WINDOWS: - other_limit = 2**31-1 - else: - other_limit = 2**31 - for n in [maxint, other_limit]: - cls.MAX_SIZE = n / ctypes.sizeof(ctypes_item) - try: - cls._ptrtype = ctypes.POINTER(cls.MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass - else: - break - else: - raise e - return cls._ptrtype - def _indexable(self, index): - PtrType = self._get_ptrtype() - assert index + 1 < self.MAX_SIZE - p = ctypes.cast(ctypes.pointer(self.items), PtrType) - return p.contents + p = ctypes.cast(self.items, ctypes_item_ptr) + return p def _getitem(self, index, boundscheck=True): if boundscheck: From pypy.commits at gmail.com Sat Apr 30 04:42:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 01:42:30 -0700 (PDT) Subject: [pypy-commit] pypy default: The bogus casting is not needed any more after 9f9c409ee27e Message-ID: <57246ff6.4ea81c0a.e323b.5d25@mx.google.com> Author: Armin Rigo Branch: Changeset: r84053:ac2d62414b37 Date: 2016-04-30 10:42 +0200 http://bitbucket.org/pypy/pypy/changeset/ac2d62414b37/ Log: The bogus casting is not needed any more after 9f9c409ee27e diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -239,9 +239,7 @@ gufunctype = lltype.Ptr(ufuncs.GenericUfunc) -# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there -# a problem with casting function pointers? - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -256,7 +254,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(funcs[i], data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -268,7 +266,7 @@ w_signature, w_identity, w_name, w_doc, stack_inputs=True) return ufunc_generic - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): From pypy.commits at gmail.com Sat Apr 30 07:05:01 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 04:05:01 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: more translation fighting Message-ID: <5724915d.49961c0a.18aa.ffff94e4@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84054:bfce7130fc86 Date: 2016-04-30 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/bfce7130fc86/ Log: more translation fighting diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -862,21 +862,19 @@ elif is_PyObject(restype): if is_pyobj(result): - retval = result + assert 0, "XXX retval = result" else: - if result is not None: - if result_borrowed: - retval = as_pyobj(space, result) - else: - retval = make_ref(space, result) - retval = rffi.cast(restype, retval) + if result_borrowed: + result = as_pyobj(space, result) else: - retval = lltype.nullptr(restype.TO) - elif restype is not lltype.Void: - retval = rffi.cast(restype, result) + result = make_ref(space, result) + retval = rffi.cast(restype, result) + except Exception, e: unexpected_exception(callable2name[callable], e, tb) + return fatal_value + assert lltype.typeOf(retval) == restype rffi.stackcounter.stacks_counter -= 1 # see "Handling of the GIL" above From pypy.commits at gmail.com Sat Apr 30 07:05:03 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 04:05:03 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: hg merge default Message-ID: <5724915f.45bd1c0a.7f058.ffff9621@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84055:4c3d9f56e6d4 Date: 2016-04-30 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/4c3d9f56e6d4/ Log: hg merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -117,13 +117,22 @@ On which platforms does PyPy run? --------------------------------- -PyPy is regularly and extensively tested on Linux machines. It mostly +PyPy currently supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +PyPy is regularly and extensively tested on Linux machines. It works on Mac and Windows: it is tested there, but most of us are running -Linux so fixes may depend on 3rd-party contributions. PyPy's JIT -works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7). -Support for POWER (64-bit) is stalled at the moment. +Linux so fixes may depend on 3rd-party contributions. -To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or +To bootstrap from sources, PyPy can use either CPython 2.7 or another (e.g. older) PyPy. Cross-translation is not really supported: e.g. to build a 32-bit PyPy, you need to have a 32-bit environment. Cross-translation is only explicitly supported between a 32-bit Intel diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -362,6 +362,45 @@ """) assert seen == [1] + def test_mapdict_number_of_slots(self): + space = self.space + a, b, c = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + a = A() + a.x = 1 + class B: + pass + b = B() + b.x = 1 + class C(int): + pass + c = C(1) + c.x = 1 + return a, b, c + """), 3) + assert not hasattr(a, "storage") + assert not hasattr(b, "storage") + assert hasattr(c, "storage") + + def test_del(self): + space = self.space + a, b, c, d = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + class B(object): + def __del__(self): + pass + class F(file): + pass + class G(file): + def __del__(self): + pass + return A(), B(), F("xyz", "w"), G("ghi", "w") + """)) + assert type(b).__base__ is type(a) + assert hasattr(c, "__del__") + assert type(d) is type(c) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -103,43 +103,61 @@ # we need two subclasses of the app-level type, one to add mapdict, and then one # to add del to not slow down the GC. -def get_unique_interplevel_subclass(config, cls, needsdel=False): +def get_unique_interplevel_subclass(space, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, needsdel + key = space, cls, needsdel try: return _subclass_cache[key] except KeyError: # XXX can save a class if cls already has a __del__ - if needsdel: - cls = get_unique_interplevel_subclass(config, cls, False) - subcls = _getusercls(config, cls, needsdel) + keys = [key] + base_has_del = hasattr(cls, '__del__') + if base_has_del: + # if the base has a __del__, we only need one class + keys = [(space, cls, True), (space, cls, False)] + needsdel = True + elif needsdel: + cls = get_unique_interplevel_subclass(space, cls, False) + subcls = _getusercls(space, cls, needsdel) assert key not in _subclass_cache - _subclass_cache[key] = subcls + for key in keys: + _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_del, reallywantdict=False): +def _getusercls(space, cls, wants_del, reallywantdict=False): from rpython.rlib import objectmodel + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.module.__builtin__.interp_classobj import W_InstanceObject from pypy.objspace.std.mapdict import (BaseUserClassMapdict, MapdictDictSupport, MapdictWeakrefSupport, - _make_storage_mixin_size_n) + _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()] - if reallywantdict or not typedef.hasdict: - # the type has no dict, mapdict to provide the dict - mixins_needed.append(MapdictDictSupport) - name += "Dict" - if not typedef.weakrefable: - # the type does not support weakrefs yet, mapdict to provide weakref - # support - mixins_needed.append(MapdictWeakrefSupport) - name += "Weakrefable" + mixins_needed = [] + copy_methods = [] + mixins_needed = [] + name = cls.__name__ + if not cls.user_overridden_class: + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) + copy_methods = [BaseUserClassMapdict] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + copy_methods.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + copy_methods.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: name += "Del" parent_destructor = getattr(cls, '__del__', None) @@ -148,14 +166,14 @@ parent_destructor(self) def call_applevel_del(self): assert isinstance(self, subcls) - self.space.userdel(self) + space.userdel(self) class Proto(object): def __del__(self): self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, call_applevel_del, + self.enqueue_for_destruction(space, call_applevel_del, 'method __del__ of ') if parent_destructor is not None: - self.enqueue_for_destruction(self.space, call_parent_del, + self.enqueue_for_destruction(space, call_parent_del, 'internal destructor of ') mixins_needed.append(Proto) @@ -163,10 +181,17 @@ user_overridden_class = True for base in mixins_needed: objectmodel.import_from_mixin(base) + for copycls in copy_methods: + _copy_methods(copycls, subcls) del subcls.base subcls.__name__ = name return subcls +def _copy_methods(copycls, subcls): + for key, value in copycls.__dict__.items(): + if (not key.startswith('__') or key == '__del__'): + setattr(subcls, key, value) + # ____________________________________________________________ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -195,9 +195,9 @@ return self.cls_without_del = _getusercls( - space.config, W_InstanceObject, False, reallywantdict=True) + space, W_InstanceObject, False, reallywantdict=True) self.cls_with_del = _getusercls( - space.config, W_InstanceObject, True, reallywantdict=True) + space, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -239,9 +239,7 @@ gufunctype = lltype.Ptr(ufuncs.GenericUfunc) -# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there -# a problem with casting function pointers? - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject, header=HEADER) def PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -256,7 +254,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(funcs[i], data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -268,7 +266,7 @@ w_signature, w_identity, w_name, w_doc, stack_inputs=True) return ufunc_generic - at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], PyObject, header=HEADER) def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes, nin, nout, identity, name, doc, check_return): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -254,13 +254,15 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space.getbuiltinmodule("cpyext") - from pypy.module.imp.importing import importhook - importhook(cls.space, "os") # warm up reference counts + space = cls.space + space.getbuiltinmodule("cpyext") + # 'import os' to warm up reference counts + w_import = space.builtin.getdictvalue(space, '__import__') + space.call_function(w_import, space.wrap("os")) #state = cls.space.fromcache(RefcountState) ZZZ #state.non_heaptypes_w[:] = [] if not cls.runappdirect: - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + cls.w_runappdirect = space.wrap(cls.runappdirect) def setup_method(self, func): @gateway.unwrap_spec(name=str) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -366,7 +366,7 @@ def test_ufunc(self): if self.runappdirect: from numpy import arange - py.test.xfail('why does this segfault on cpython?') + py.test.xfail('segfaults on cpython: PyUFunc_API == NULL?') else: from _numpypy.multiarray import arange mod = self.import_extension('foo', [ diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -277,7 +277,7 @@ def copy(self, obj): result = Object() result.space = self.space - result._init_empty(self) + result._mapdict_init_empty(self) return result def length(self): @@ -286,7 +286,7 @@ def set_terminator(self, obj, terminator): result = Object() result.space = self.space - result._init_empty(terminator) + result._mapdict_init_empty(terminator) return result def remove_dict_entries(self, obj): @@ -304,7 +304,7 @@ def materialize_r_dict(self, space, obj, dict_w): result = Object() result.space = space - result._init_empty(self.devolved_dict_terminator) + result._mapdict_init_empty(self.devolved_dict_terminator) return result @@ -417,11 +417,6 @@ def __repr__(self): return "" % (self.name, self.index, self.storageindex, self.back) -def _become(w_obj, new_obj): - # this is like the _become method, really, but we cannot use that due to - # RPython reasons - w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - class MapAttrCache(object): def __init__(self, space): SIZE = 1 << space.config.objspace.std.methodcachesizeexp @@ -457,22 +452,12 @@ # everything that's needed to use mapdict for a user subclass at all. # This immediately makes slots possible. - # assumes presence of _init_empty, _mapdict_read_storage, + # assumes presence of _get_mapdict_map, _set_mapdict_map + # _mapdict_init_empty, _mapdict_read_storage, # _mapdict_write_storage, _mapdict_storage_length, # _set_mapdict_storage_and_map # _____________________________________________ - # methods needed for mapdict - - def _become(self, new_obj): - self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) - - def _get_mapdict_map(self): - return jit.promote(self.map) - def _set_mapdict_map(self, map): - self.map = map - - # _____________________________________________ # objspace interface # class access @@ -482,15 +467,14 @@ def setclass(self, space, w_cls): new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def user_setup(self, space, w_subtype): from pypy.module.__builtin__.interp_classobj import W_InstanceObject - self.space = space assert (not self.typedef.hasdict or isinstance(w_subtype.terminator, NoDictTerminator) or self.typedef is W_InstanceObject.typedef) - self._init_empty(w_subtype.terminator) + self._mapdict_init_empty(w_subtype.terminator) # methods needed for slots @@ -508,7 +492,7 @@ new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True @@ -549,7 +533,7 @@ new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False - self._become(new_obj) + self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) return True def getdict(self, space): @@ -599,7 +583,12 @@ assert flag class MapdictStorageMixin(object): - def _init_empty(self, map): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map + + def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) @@ -613,6 +602,7 @@ def _mapdict_storage_length(self): return len(self.storage) + def _set_mapdict_storage_and_map(self, storage, map): self.storage = storage self.map = map @@ -643,7 +633,11 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) valnmin1 = "_value%s" % nmin1 class subcls(object): - def _init_empty(self, map): + def _get_mapdict_map(self): + return jit.promote(self.map) + def _set_mapdict_map(self, map): + self.map = map + def _mapdict_init_empty(self, map): for i in rangenmin1: setattr(self, "_value%s" % i, None) setattr(self, valnmin1, erase_item(None)) @@ -731,7 +725,7 @@ def get_empty_storage(self): w_result = Object() terminator = self.space.fromcache(get_terminator_for_dicts) - w_result._init_empty(terminator) + w_result._mapdict_init_empty(terminator) return self.erase(w_result) def switch_to_object_strategy(self, w_dict): @@ -811,7 +805,7 @@ def clear(self, w_dict): w_obj = self.unerase(w_dict.dstorage) new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj) - _become(w_obj, new_obj) + w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) def popitem(self, w_dict): curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) @@ -836,7 +830,7 @@ def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() new_obj = map.materialize_r_dict(space, obj, dict_w) - _become(obj, new_obj) + obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) class MapDictIteratorKeys(BaseKeyIterator): def __init__(self, space, strategy, dictimplementation): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -358,7 +358,7 @@ cls = cls.typedef.applevel_subclasses_base # subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.needsdel) + self, cls, w_subtype.needsdel) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -231,17 +231,7 @@ assert max_n >= 0 ITEM = A.OF ctypes_item = get_ctypes_type(ITEM, delayed_builders) - # Python 2.5 ctypes can raise OverflowError on 64-bit builds - for n in [maxint, 2**31]: - MAX_SIZE = n/64 - try: - PtrType = ctypes.POINTER(MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass # ^^^ bah, blame ctypes - else: - break - else: - raise e + ctypes_item_ptr = ctypes.POINTER(ctypes_item) class CArray(ctypes.Structure): if is_emulated_long: @@ -265,35 +255,9 @@ bigarray.length = n return bigarray - _ptrtype = None - - @classmethod - def _get_ptrtype(cls): - if cls._ptrtype: - return cls._ptrtype - # ctypes can raise OverflowError on 64-bit builds - # on windows it raises AttributeError even for 2**31 (_length_ missing) - if _MS_WINDOWS: - other_limit = 2**31-1 - else: - other_limit = 2**31 - for n in [maxint, other_limit]: - cls.MAX_SIZE = n / ctypes.sizeof(ctypes_item) - try: - cls._ptrtype = ctypes.POINTER(cls.MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass - else: - break - else: - raise e - return cls._ptrtype - def _indexable(self, index): - PtrType = self._get_ptrtype() - assert index + 1 < self.MAX_SIZE - p = ctypes.cast(ctypes.pointer(self.items), PtrType) - return p.contents + p = ctypes.cast(self.items, ctypes_item_ptr) + return p def _getitem(self, index, boundscheck=True): if boundscheck: @@ -1045,12 +1009,22 @@ container = _array_of_known_length(T.TO) container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): + # cobj is a CFunctionType object. We naively think + # that it should be a function pointer. No no no. If + # it was read out of an array, say, then it is a *pointer* + # to a function pointer. In other words, the read doesn't + # read anything, it just takes the address of the function + # pointer inside the array. If later the array is modified + # or goes out of scope, then we crash. CTypes is fun. + # It works if we cast it now to an int and back. cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: container = _int2obj[cobjkey] else: + name = getattr(cobj, '__name__', '?') + cobj = ctypes.cast(cobjkey, type(cobj)) _callable = get_ctypes_trampoline(T.TO, cobj) - return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'), + return lltype.functionptr(T.TO, name, _callable=_callable) elif isinstance(T.TO, lltype.OpaqueType): if T == llmemory.GCREF: diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -1405,6 +1405,45 @@ a2 = ctypes2lltype(lltype.Ptr(A), lltype2ctypes(a)) assert a2._obj.getitem(0)._obj._parentstructure() is a2._obj + def test_array_of_function_pointers(self): + c_source = py.code.Source(r""" + #include "src/precommondefs.h" + #include + + typedef int(*funcptr_t)(void); + static int forty_two(void) { return 42; } + static int forty_three(void) { return 43; } + static funcptr_t testarray[2]; + RPY_EXPORTED void runtest(void cb(funcptr_t *)) { + testarray[0] = &forty_two; + testarray[1] = &forty_three; + fprintf(stderr, "&forty_two = %p\n", testarray[0]); + fprintf(stderr, "&forty_three = %p\n", testarray[1]); + cb(testarray); + testarray[0] = 0; + testarray[1] = 0; + } + """) + eci = ExternalCompilationInfo(include_dirs=[cdir], + separate_module_sources=[c_source]) + + PtrF = lltype.Ptr(lltype.FuncType([], rffi.INT)) + ArrayPtrF = rffi.CArrayPtr(PtrF) + CALLBACK = rffi.CCallback([ArrayPtrF], lltype.Void) + + runtest = rffi.llexternal('runtest', [CALLBACK], lltype.Void, + compilation_info=eci) + seen = [] + + def callback(testarray): + seen.append(testarray[0]) # read a PtrF out of testarray + seen.append(testarray[1]) + + runtest(callback) + assert seen[0]() == 42 + assert seen[1]() == 43 + + class TestPlatform(object): def test_lib_on_libpaths(self): from rpython.translator.platform import platform From pypy.commits at gmail.com Sat Apr 30 07:05:05 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 04:05:05 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: mess in progress Message-ID: <57249161.8673c20a.6b07f.7621@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84056:e4d93f6ca982 Date: 2016-04-30 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/e4d93f6ca982/ Log: mess in progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -256,7 +256,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None, result_borrowed=False): + c_name=None, gil=None, result_borrowed=False, result_is_ll=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -277,6 +277,9 @@ assert len(self.argnames) == len(self.argtypes) self.gil = gil self.result_borrowed = result_borrowed + self.result_is_ll = result_is_ll + if result_is_ll: # means 'returns a low-level PyObject pointer' + assert is_PyObject(restype) # def get_llhelper(space): return llhelper(self.functype, self.get_wrapper(space)) @@ -298,7 +301,7 @@ DEFAULT_HEADER = 'pypy_decl.h' def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header=DEFAULT_HEADER, - gil=None, result_borrowed=False): + gil=None, result_borrowed=False, result_is_ll=False): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. @@ -337,7 +340,8 @@ c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, c_name=c_name, gil=gil, - result_borrowed=result_borrowed) + result_borrowed=result_borrowed, + result_is_ll=result_is_ll) func.api_func = api_function if error is _NOT_SPECIFIED: @@ -613,6 +617,9 @@ def is_PyObject(TYPE): if not isinstance(TYPE, lltype.Ptr): return False + if TYPE == PyObject: + return True + assert not isinstance(TYPE.TO, lltype.ForwardReference) return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type') # a pointer to PyObject @@ -710,7 +717,7 @@ argnames = callable.api_func.argnames argtypesw = zip(callable.api_func.argtypes, [_name.startswith("w_") for _name in argnames]) - error_value = callable.api_func.error_value + error_value = getattr(callable.api_func, "error_value", CANNOT_FAIL) if (isinstance(callable.api_func.restype, lltype.Ptr) and error_value is not CANNOT_FAIL): assert lltype.typeOf(error_value) == callable.api_func.restype @@ -720,6 +727,7 @@ signature = (tuple(argtypesw), callable.api_func.restype, callable.api_func.result_borrowed, + callable.api_func.result_is_ll, error_value, gil) @@ -769,7 +777,7 @@ assert False def make_wrapper_second_level(space, callable2name, argtypesw, restype, - result_borrowed, error_value, gil): + result_borrowed, result_is_ll, error_value, gil): from rpython.rlib import rgil argtypes_enum_ui = unrolling_iterable(enumerate(argtypesw)) fatal_value = restype._defl() @@ -862,13 +870,17 @@ elif is_PyObject(restype): if is_pyobj(result): - assert 0, "XXX retval = result" + assert result_is_ll else: + assert not result_is_ll if result_borrowed: result = as_pyobj(space, result) else: result = make_ref(space, result) - retval = rffi.cast(restype, result) + retval = rffi.cast(restype, result) + + elif restype is not lltype.Void: + retval = rffi.cast(restype, result) except Exception, e: unexpected_exception(callable2name[callable], e, tb) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -124,7 +124,7 @@ #_______________________________________________________________________ - at cpython_api([CONST_STRING, Py_ssize_t], PyObject) + at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True) def PyString_FromStringAndSize(space, char_p, length): if char_p: s = rffi.charpsize2str(char_p, length) diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -67,7 +67,8 @@ track_reference(space, py_obj, w_obj) return w_obj - at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) + at cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject, + result_is_ll=True) def PyFrame_New(space, tstate, w_code, w_globals, w_locals): typedescr = get_typedescr(PyFrame.typedef) py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -34,11 +34,11 @@ def PyObject_Free(space, ptr): lltype.free(ptr, flavor='raw') - at cpython_api([PyTypeObjectPtr], PyObject) + at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True) def _PyObject_New(space, type): return _PyObject_NewVar(space, type, 0) - at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject) + at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True) def _PyObject_NewVar(space, type, itemcount): w_type = from_ref(space, rffi.cast(PyObject, type)) assert isinstance(w_type, W_TypeObject) @@ -63,7 +63,7 @@ if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: Py_DecRef(space, rffi.cast(PyObject, pto)) - at cpython_api([PyTypeObjectPtr], PyObject) + at cpython_api([PyTypeObjectPtr], PyObject, result_is_ll=True) def _PyObject_GC_New(space, type): return _PyObject_New(space, type) @@ -193,7 +193,7 @@ space.delitem(w_obj, w_key) return 0 - at cpython_api([PyObject, PyTypeObjectPtr], PyObject) + at cpython_api([PyObject, PyTypeObjectPtr], PyObject, result_is_ll=True) def PyObject_Init(space, obj, type): """Initialize a newly-allocated object op with its type and initial reference. Returns the initialized object. If type indicates that the @@ -207,7 +207,7 @@ obj.c_ob_refcnt = 1 return obj - at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject) + at cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True) def PyObject_InitVar(space, py_obj, type, size): """This does everything PyObject_Init() does, and also initializes the length information for a variable-size object.""" diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -168,8 +168,16 @@ state = space.fromcache(InterpreterState) return state.get_thread_state(space) - at cpython_api([], PyObject, error=CANNOT_FAIL) + at cpython_api([], PyObject, result_is_ll=True, error=CANNOT_FAIL) def PyThreadState_GetDict(space): + """Return a dictionary in which extensions can store thread-specific state + information. Each extension should use a unique key to use to store state in + the dictionary. It is okay to call this function when no current thread state + is available. If this function returns NULL, no exception has been raised and + the caller should assume no current thread state is available. + + Previously this could only be called when a current thread is active, and NULL + meant that an exception was raised.""" state = space.fromcache(InterpreterState) return state.get_thread_state(space).c_dict diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1156,19 +1156,6 @@ PyInterpreterState_Clear().""" raise NotImplementedError - at cpython_api([], PyObject) -def PyThreadState_GetDict(space): - """Return a dictionary in which extensions can store thread-specific state - information. Each extension should use a unique key to use to store state in - the dictionary. It is okay to call this function when no current thread state - is available. If this function returns NULL, no exception has been raised and - the caller should assume no current thread state is available. - - Previously this could only be called when a current thread is active, and NULL - meant that an exception was raised.""" - borrow_from() - raise NotImplementedError - @cpython_api([lltype.Signed, PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyThreadState_SetAsyncExc(space, id, exc): """Asynchronously raise an exception in a thread. The id argument is the thread diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -127,7 +127,7 @@ #_______________________________________________________________________ - at cpython_api([Py_ssize_t], PyObject) + at cpython_api([Py_ssize_t], PyObject, result_is_ll=True) def PyTuple_New(space, size): return rffi.cast(PyObject, new_empty_tuple(space, size)) @@ -150,7 +150,8 @@ decref(space, old_ref) return 0 - at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) + at cpython_api([PyObject, Py_ssize_t], PyObject, + result_borrowed=True, result_is_ll=True) def PyTuple_GetItem(space, ref, index): if not tuple_check_ref(space, ref): PyErr_BadInternalCall(space) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -752,7 +752,7 @@ w_type2 = from_ref(space, rffi.cast(PyObject, b)) return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct? - at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject) + at cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject, result_is_ll=True) def PyType_GenericAlloc(space, type, nitems): from pypy.module.cpyext.object import _PyObject_NewVar return _PyObject_NewVar(space, type, nitems) From pypy.commits at gmail.com Sat Apr 30 12:09:09 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 09:09:09 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: More result_is_ll Message-ID: <5724d8a5.821b1c0a.1195c.3089@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84059:6c55da8738b8 Date: 2016-04-30 13:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6c55da8738b8/ Log: More result_is_ll diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -328,7 +328,7 @@ return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict') - at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) + at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True) def PyUnicode_FromUnicode(space, wchar_p, length): """Create a Unicode Object from the Py_UNICODE buffer u of the given size. u may be NULL which causes the contents to be undefined. It is the user's @@ -342,14 +342,14 @@ else: return rffi.cast(PyObject, new_empty_unicode(space, length)) - at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) + at cpython_api([CONST_WSTRING, Py_ssize_t], PyObject, result_is_ll=True) def PyUnicode_FromWideChar(space, wchar_p, length): """Create a Unicode object from the wchar_t buffer w of the given size. Return NULL on failure.""" # PyPy supposes Py_UNICODE == wchar_t return PyUnicode_FromUnicode(space, wchar_p, length) - at cpython_api([PyObject, CONST_STRING], PyObject) + at cpython_api([PyObject, CONST_STRING], PyObject, result_is_ll=True) def _PyUnicode_AsDefaultEncodedString(space, ref, errors): # Returns a borrowed reference. py_uni = rffi.cast(PyUnicodeObject, ref) @@ -430,7 +430,7 @@ w_str = space.wrap(rffi.charp2str(s)) return space.call_method(w_str, 'decode', space.wrap("utf-8")) - at cpython_api([CONST_STRING, Py_ssize_t], PyObject) + at cpython_api([CONST_STRING, Py_ssize_t], PyObject, result_is_ll=True) def PyUnicode_FromStringAndSize(space, s, size): """Create a Unicode Object from the char buffer u. The bytes will be interpreted as being UTF-8 encoded. u may also be NULL which causes the From pypy.commits at gmail.com Sat Apr 30 12:09:07 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 09:09:07 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: Give up on this test, which is also half-not-passing in default Message-ID: <5724d8a3.2179c20a.b24a7.ffffe431@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84058:c1ee0447bc68 Date: 2016-04-30 13:00 +0100 http://bitbucket.org/pypy/pypy/changeset/c1ee0447bc68/ Log: Give up on this test, which is also half-not-passing in default diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py --- a/pypy/module/cpyext/test/test_pyerrors.py +++ b/pypy/module/cpyext/test/test_pyerrors.py @@ -365,6 +365,8 @@ assert "in test_PyErr_Display\n" in output assert "ZeroDivisionError" in output + @pytest.mark.skipif(True, reason= + "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free") def test_GetSetExcInfo(self): import sys if self.runappdirect and (sys.version_info.major < 3 or From pypy.commits at gmail.com Sat Apr 30 12:09:11 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 09:09:11 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: more translation fixes Message-ID: <5724d8a7.cbb81c0a.5d920.fffff51e@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84060:c042ef987a6a Date: 2016-04-30 13:40 +0100 http://bitbucket.org/pypy/pypy/changeset/c042ef987a6a/ Log: more translation fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -684,18 +684,21 @@ def __init__(self, space): self.space = space self.wrapper_gens = {} # {signature: WrapperGen()} - self.callable2name = {} self.stats = [0, 0] class WrapperGen(object): + wrapper_second_level = None + def __init__(self, space, signature): self.space = space - self.callable2name = {} - self.wrapper_second_level = make_wrapper_second_level( - self.space, self.callable2name, *signature) + self.signature = signature + self.callable2name = [] def make_wrapper(self, callable): - self.callable2name[callable] = callable.__name__ + self.callable2name.append((callable, callable.__name__)) + if self.wrapper_second_level is None: + self.wrapper_second_level = make_wrapper_second_level( + self.space, self.callable2name, *self.signature) wrapper_second_level = self.wrapper_second_level def wrapper(*args): @@ -747,12 +750,12 @@ @dont_inline def deadlock_error(funcname): fatalerror_notb("GIL deadlock detected when a CPython C extension " - "module calls %r" % (funcname,)) + "module calls '%s'" % (funcname,)) @dont_inline def no_gil_error(funcname): fatalerror_notb("GIL not held when a CPython C extension " - "module calls %r" % (funcname,)) + "module calls '%s'" % (funcname,)) @dont_inline def not_supposed_to_fail(funcname): @@ -794,6 +797,18 @@ if error_value is not CANNOT_FAIL: assert lltype.typeOf(error_value) == lltype.typeOf(fatal_value) + def invalid(err): + "NOT_RPYTHON: translation-time crash if this ends up being called" + raise ValueError(err) + invalid.__name__ = 'invalid_%s' % (callable2name[0][1],) + + def nameof(callable): + for c, n in callable2name: + if c is callable: + return n + return '' + nameof._dont_inline_ = True + def wrapper_second_level(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj from pypy.module.cpyext.pyobject import as_pyobj @@ -806,7 +821,7 @@ tid = rthread.get_or_make_ident() if gil_acquire: if cpyext_glob_tid_ptr[0] == tid: - deadlock_error(callable2name[callable]) + deadlock_error(nameof(callable)) rgil.acquire() assert cpyext_glob_tid_ptr[0] == 0 elif pygilstate_ensure: @@ -819,7 +834,7 @@ args += (pystate.PyGILState_UNLOCKED,) else: if cpyext_glob_tid_ptr[0] != tid: - no_gil_error(callable2name[callable]) + no_gil_error(nameof(callable)) cpyext_glob_tid_ptr[0] = 0 rffi.stackcounter.stacks_counter += 1 @@ -865,14 +880,16 @@ if failed: if error_value is CANNOT_FAIL: - raise not_supposed_to_fail(callable2name[callable]) + raise not_supposed_to_fail(nameof(callable)) retval = error_value elif is_PyObject(restype): if is_pyobj(result): - assert result_is_ll + if not result_is_ll: + raise invalid("missing result_is_ll=True") else: - assert not result_is_ll + if result_is_ll: + raise invalid("result_is_ll=True but not ll PyObject") if result_borrowed: result = as_pyobj(space, result) else: @@ -883,7 +900,7 @@ retval = rffi.cast(restype, result) except Exception, e: - unexpected_exception(callable2name[callable], e, tb) + unexpected_exception(nameof(callable), e, tb) return fatal_value assert lltype.typeOf(retval) == restype diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -308,7 +308,7 @@ w_res = PyObject_RichCompare(space, ref1, ref2, opid) return int(space.is_true(w_res)) - at cpython_api([PyObject], PyObject) + at cpython_api([PyObject], PyObject, result_is_ll=True) def PyObject_SelfIter(space, ref): """Undocumented function, this is what CPython does.""" Py_IncRef(space, ref) From pypy.commits at gmail.com Sat Apr 30 12:09:13 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 09:09:13 -0700 (PDT) Subject: [pypy-commit] pypy share-cpyext-cpython-api: hg merge default Message-ID: <5724d8a9.8d1f1c0a.31a86.fffffbba@mx.google.com> Author: Armin Rigo Branch: share-cpyext-cpython-api Changeset: r84061:d69ecfcd442d Date: 2016-04-30 17:11 +0100 http://bitbucket.org/pypy/pypy/changeset/d69ecfcd442d/ Log: hg merge default diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -544,6 +544,21 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) +class __extend__(pairtype(FunctionReprBase, FunctionReprBase)): + def rtype_is_((robj1, robj2), hop): + if hop.s_result.is_constant(): + return inputconst(Bool, hop.s_result.const) + s_pbc = annmodel.unionof(robj1.s_pbc, robj2.s_pbc) + r_pbc = hop.rtyper.getrepr(s_pbc) + v1, v2 = hop.inputargs(r_pbc, r_pbc) + assert v1.concretetype == v2.concretetype + if v1.concretetype == Char: + return hop.genop('char_eq', [v1, v2], resulttype=Bool) + elif isinstance(v1.concretetype, Ptr): + return hop.genop('ptr_eq', [v1, v2], resulttype=Bool) + else: + raise TyperError("unknown type %r" % (v1.concretetype,)) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1497,6 +1497,47 @@ res = self.interpret(f, [2]) assert res == False + def test_is_among_functions_2(self): + def g1(): pass + def g2(): pass + def f(n): + if n > 5: + g = g2 + else: + g = g1 + g() + return g is g2 + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [8]) + assert res == True + + def test_is_among_functions_3(self): + def g0(): pass + def g1(): pass + def g2(): pass + def g3(): pass + def g4(): pass + def g5(): pass + def g6(): pass + def g7(): pass + glist = [g0, g1, g2, g3, g4, g5, g6, g7] + def f(n): + if n > 5: + g = g2 + else: + g = g1 + h = glist[n] + g() + h() + return g is h + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [1]) + assert res == True + res = self.interpret(f, [6]) + assert res == False + def test_shrink_pbc_set(self): def g1(): return 10 From pypy.commits at gmail.com Sat Apr 30 12:04:42 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Apr 2016 09:04:42 -0700 (PDT) Subject: [pypy-commit] pypy default: RPython: using "is" to compare functions does not really work Message-ID: <5724d79a.4e981c0a.29b80.fffff827@mx.google.com> Author: Armin Rigo Branch: Changeset: r84057:c8b5120f31d2 Date: 2016-04-30 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/c8b5120f31d2/ Log: RPython: using "is" to compare functions does not really work depending on the small-function-set optimization diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -544,6 +544,21 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) +class __extend__(pairtype(FunctionReprBase, FunctionReprBase)): + def rtype_is_((robj1, robj2), hop): + if hop.s_result.is_constant(): + return inputconst(Bool, hop.s_result.const) + s_pbc = annmodel.unionof(robj1.s_pbc, robj2.s_pbc) + r_pbc = hop.rtyper.getrepr(s_pbc) + v1, v2 = hop.inputargs(r_pbc, r_pbc) + assert v1.concretetype == v2.concretetype + if v1.concretetype == Char: + return hop.genop('char_eq', [v1, v2], resulttype=Bool) + elif isinstance(v1.concretetype, Ptr): + return hop.genop('ptr_eq', [v1, v2], resulttype=Bool) + else: + raise TyperError("unknown type %r" % (v1.concretetype,)) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1497,6 +1497,47 @@ res = self.interpret(f, [2]) assert res == False + def test_is_among_functions_2(self): + def g1(): pass + def g2(): pass + def f(n): + if n > 5: + g = g2 + else: + g = g1 + g() + return g is g2 + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [8]) + assert res == True + + def test_is_among_functions_3(self): + def g0(): pass + def g1(): pass + def g2(): pass + def g3(): pass + def g4(): pass + def g5(): pass + def g6(): pass + def g7(): pass + glist = [g0, g1, g2, g3, g4, g5, g6, g7] + def f(n): + if n > 5: + g = g2 + else: + g = g1 + h = glist[n] + g() + h() + return g is h + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [1]) + assert res == True + res = self.interpret(f, [6]) + assert res == False + def test_shrink_pbc_set(self): def g1(): return 10 From pypy.commits at gmail.com Sat Apr 30 14:06:46 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 11:06:46 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <5724f436.22acc20a.2b9b.ffffe95c@mx.google.com> Author: Matti Picus Branch: Changeset: r84062:d70b2bdcba73 Date: 2016-04-30 21:00 +0300 http://bitbucket.org/pypy/pypy/changeset/d70b2bdcba73/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -24,7 +24,11 @@ remove-objspace-options. .. branch: cpyext-for-merge -Update cpyext C-API support: + +Update cpyext C-API support After this branch, we are almost able to support +upstream numpy via cpyext, so we created (yet another) fork of numpy at +github.com/pypy/numpy with the needed changes. Among the significant changes +to cpyext: - allow c-snippet tests to be run with -A so we can verify we are compatible - fix many edge cases exposed by fixing tests to run with -A - issequence() logic matches cpython @@ -40,6 +44,8 @@ - rewrite slot assignment for typeobjects - improve tracking of PyObject to rpython object mapping - support tp_as_{number, sequence, mapping, buffer} slots -After this branch, we are almost able to support upstream numpy via cpyext, so -we created (yet another) fork of numpy at github.com/pypy/numpy with the needed -changes + +.. branch: share-mapdict-methods-2 + +Reduce generated code for subclasses by using the same function objects in all +generated subclasses. From pypy.commits at gmail.com Sat Apr 30 15:30:07 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Apr 2016 12:30:07 -0700 (PDT) Subject: [pypy-commit] pypy py3k-update: Fix pointer types in test module creation Message-ID: <572507bf.c486c20a.8ce98.21bb@mx.google.com> Author: Ronan Lamy Branch: py3k-update Changeset: r84063:61f551d2e3fc Date: 2016-04-30 20:29 +0100 http://bitbucket.org/pypy/pypy/changeset/61f551d2e3fc/ Log: Fix pointer types in test module creation diff --git a/pypy/module/cpyext/test/banana.c b/pypy/module/cpyext/test/banana.c --- a/pypy/module/cpyext/test/banana.c +++ b/pypy/module/cpyext/test/banana.c @@ -9,11 +9,11 @@ "banana", "Module Doc", -1, - &banana_functions + banana_functions, }; PyMODINIT_FUNC -*PyInit_banana(void) +PyInit_banana(void) { return PyModule_Create(&moduledef); } diff --git a/pypy/module/cpyext/test/comparisons.c b/pypy/module/cpyext/test/comparisons.c --- a/pypy/module/cpyext/test/comparisons.c +++ b/pypy/module/cpyext/test/comparisons.c @@ -83,7 +83,7 @@ PyMODINIT_FUNC -*PyInit_comparisons(void) +PyInit_comparisons(void) { PyObject *m, *d; diff --git a/pypy/module/cpyext/test/date.c b/pypy/module/cpyext/test/date.c --- a/pypy/module/cpyext/test/date.c +++ b/pypy/module/cpyext/test/date.c @@ -9,11 +9,11 @@ "date", "Module Doc", -1, - &date_functions + date_functions, }; PyMODINIT_FUNC -*PyInit_date(void) +PyInit_date(void) { PyObject *module, *othermodule; module = PyModule_Create(&moduledef); diff --git a/pypy/module/cpyext/test/dotted.c b/pypy/module/cpyext/test/dotted.c --- a/pypy/module/cpyext/test/dotted.c +++ b/pypy/module/cpyext/test/dotted.c @@ -9,11 +9,11 @@ "pypy.module.cpyext.test.dotted", "Module Doc", -1, - &dotted_functions + dotted_functions }; PyMODINIT_FUNC -*PyInit_dotted(void) +PyInit_dotted(void) { return PyModule_Create(&moduledef); } diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -654,7 +654,11 @@ "foo", "Module Doc", -1, - &foo_functions + foo_functions, + NULL, + NULL, + NULL, + NULL, }; /* Initialize this module. */ @@ -665,7 +669,7 @@ #endif PyMODINIT_FUNC -*PyInit_foo(void) +PyInit_foo(void) { PyObject *m, *d; diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -67,7 +67,7 @@ "foo", "Module Doc", -1, - &sbkMethods, + sbkMethods, NULL, NULL, NULL, diff --git a/pypy/module/cpyext/test/modinit.c b/pypy/module/cpyext/test/modinit.c --- a/pypy/module/cpyext/test/modinit.c +++ b/pypy/module/cpyext/test/modinit.c @@ -14,7 +14,7 @@ "modinit", "", -1, - &methods + methods }; PyMODINIT_FUNC diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -395,7 +395,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ % dict(methods='\n'.join(methods_table), modname=modname) init = """PyObject *mod = PyModule_Create(&moduledef);""" @@ -519,7 +519,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -547,7 +547,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -670,7 +670,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -701,7 +701,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -724,7 +724,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -780,7 +780,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) @@ -849,7 +849,7 @@ "%(modname)s", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ - &methods /* m_methods */ + methods, /* m_methods */ }; """ module = self.import_module(name='foo', body=body) From pypy.commits at gmail.com Sat Apr 30 16:13:26 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 30 Apr 2016 13:13:26 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: W_Broadcast (micronumpy) is rewritten using W_FlatIterator for implementation of iters attribute. W_FlatIterator gets optional arguments in constructor. Message-ID: <572511e6.c486c20a.8ce98.2f3e@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast_nd Changeset: r84065:c0d40603d40b Date: 2016-04-24 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/c0d40603d40b/ Log: W_Broadcast (micronumpy) is rewritten using W_FlatIterator for implementation of iters attribute. W_FlatIterator gets optional arguments in constructor. diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -1,12 +1,12 @@ import pypy.module.micronumpy.constants as NPY -from nditer import ConcreteIter, parse_op_flag, parse_op_arg from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.module.micronumpy import support -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject +from pypy.module.micronumpy.base import convert_to_array, W_NumpyObject +from pypy.module.micronumpy.flatiter import W_FlatIterator from rpython.rlib import jit -from strides import calculate_broadcast_strides, shape_agreement_multiple +from strides import shape_agreement_multiple def descr_new_broadcast(space, w_subtype, __args__): return W_Broadcast(space, __args__.arguments_w) @@ -26,45 +26,21 @@ self.seq = [convert_to_array(space, w_elem) for w_elem in args] - self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, - len(self.seq), parse_op_flag) - self.shape = shape_agreement_multiple(space, self.seq, shape=None) self.order = NPY.CORDER - self.iters = [] + self.list_iter_state = [] self.index = 0 try: self.size = support.product_check(self.shape) except OverflowError as e: raise oefmt(space.w_ValueError, "broadcast dimensions too large.") - for i in range(len(self.seq)): - it = self.get_iter(space, i) - it.contiguous = False - self.iters.append((it, it.reset())) + + self.list_iter_state = [W_FlatIterator(arr, self.shape, arr.get_order() != self.order) + for arr in self.seq] self.done = False - pass - - def get_iter(self, space, i): - arr = self.seq[i] - imp = arr.implementation - if arr.is_scalar(): - return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) - shape = self.shape - - backward = imp.order != self.order - - r = calculate_broadcast_strides(imp.strides, imp.backstrides, imp.shape, - shape, backward) - - iter_shape = shape - if len(shape) != len(r[0]): - # shape can be shorter when using an external loop, just return a view - iter_shape = imp.shape - return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1], - self.op_flags[i], self) def descr_iter(self, space): return space.wrap(self) @@ -79,28 +55,26 @@ return space.wrap(self.index) def descr_get_numiter(self, space): - return space.wrap(len(self.iters)) + return space.wrap(len(self.list_iter_state)) def descr_get_number_of_dimensions(self, space): return space.wrap(len(self.shape)) + def descr_get_iters(self, space): + return space.newtuple(self.list_iter_state) + @jit.unroll_safe def descr_next(self, space): if self.index >= self.size: self.done = True raise OperationError(space.w_StopIteration, space.w_None) self.index += 1 - res = [] - for i, (it, st) in enumerate(self.iters): - res.append(self._get_item(it, st)) - self.iters[i] = (it, it.next(st)) + res = [it.descr_next(space) for it in self.list_iter_state] + if len(res) < 2: return res[0] return space.newtuple(res) - def _get_item(self, it, st): - return W_NDimArray(it.getoperand(st)) - W_Broadcast.typedef = TypeDef("numpy.broadcast", __new__=interp2app(descr_new_broadcast), @@ -111,4 +85,5 @@ index=GetSetProperty(W_Broadcast.descr_get_index), numiter=GetSetProperty(W_Broadcast.descr_get_numiter), nd=GetSetProperty(W_Broadcast.descr_get_number_of_dimensions), + iters=GetSetProperty(W_Broadcast.descr_get_iters), ) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -33,9 +33,9 @@ class W_FlatIterator(W_NDimArray): - def __init__(self, arr): + def __init__(self, arr, shape=None, backward_broadcast=False): self.base = arr - self.iter, self.state = arr.create_iter() + self.iter, self.state = arr.create_iter(shape=shape, backward_broadcast=backward_broadcast) # this is needed to support W_NDimArray interface self.implementation = FakeArrayImplementation(self.base) diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -102,3 +102,24 @@ assert hasattr(b, 'nd') assert b.nd == 3 + + def test_broadcast_iters(self): + import numpy as np + x = np.array([[[1, 2]]]) + y = np.array([[3], [4], [5]]) + + b = np.broadcast(x, y) + iters = b.iters + + # iters has right shape + assert len(iters) == 2 + assert isinstance(iters, tuple) + + step_in_y = iters[1].next() + step_in_broadcast = b.next() + step2_in_y = iters[1].next() + + # iters should not interfere with iteration in broadcast + assert step_in_y == y[0, 0] # == 3 + assert step_in_broadcast == (1, 3) + assert step2_in_y == y[1, 0] # == 4 From pypy.commits at gmail.com Sat Apr 30 16:13:24 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 30 Apr 2016 13:13:24 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: In W_Broadcast (micronumpy) implemented nd attribute Message-ID: <572511e4.21f9c20a.7fa46.2b60@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast_nd Changeset: r84064:2bcbec2ef549 Date: 2016-04-24 11:46 +0200 http://bitbucket.org/pypy/pypy/changeset/2bcbec2ef549/ Log: In W_Broadcast (micronumpy) implemented nd attribute diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -81,6 +81,9 @@ def descr_get_numiter(self, space): return space.wrap(len(self.iters)) + def descr_get_number_of_dimensions(self, space): + return space.wrap(len(self.shape)) + @jit.unroll_safe def descr_next(self, space): if self.index >= self.size: @@ -107,4 +110,5 @@ size=GetSetProperty(W_Broadcast.descr_get_size), index=GetSetProperty(W_Broadcast.descr_get_index), numiter=GetSetProperty(W_Broadcast.descr_get_numiter), + nd=GetSetProperty(W_Broadcast.descr_get_number_of_dimensions), ) diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -57,7 +57,6 @@ def test_broadcast_failures(self): import numpy as np - import sys x = np.array([1, 2, 3]) y = np.array([4, 5]) raises(ValueError, np.broadcast, x, y) @@ -95,3 +94,11 @@ else: mit = np.broadcast(*arrs) assert mit.numiter == j + + def test_broadcast_nd(self): + import numpy as np + arg1, arg2 = np.empty((6, 7)), np.empty((5, 6, 1)) + b = np.broadcast(arg1, arg2) + + assert hasattr(b, 'nd') + assert b.nd == 3 From pypy.commits at gmail.com Sat Apr 30 16:13:30 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 30 Apr 2016 13:13:30 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: fixed compilation error for W_Broadcast Message-ID: <572511ea.c30a1c0a.18ac.46da@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast_nd Changeset: r84067:23fae214f255 Date: 2016-04-24 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/23fae214f255/ Log: fixed compilation error for W_Broadcast diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -69,7 +69,7 @@ self.done = True raise OperationError(space.w_StopIteration, space.w_None) self.index += 1 - res = [it.descr_next(space) for it in self.list_iter_state] + res = [space.call_method(it, 'next') for it in self.list_iter_state] if len(res) < 2: return res[0] From pypy.commits at gmail.com Sat Apr 30 16:13:28 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 30 Apr 2016 13:13:28 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: Implemented reset for numpy broadcast object. Message-ID: <572511e8.0f801c0a.d39d8.46a8@mx.google.com> Author: Sergey Matyunin Branch: numpy_broadcast_nd Changeset: r84066:d52b849b3779 Date: 2016-04-24 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/d52b849b3779/ Log: Implemented reset for numpy broadcast object. diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -75,6 +75,11 @@ return res[0] return space.newtuple(res) + def descr_reset(self, space): + self.index = 0 + self.done = False + for it in self.list_iter_state: + it.reset() W_Broadcast.typedef = TypeDef("numpy.broadcast", __new__=interp2app(descr_new_broadcast), @@ -86,4 +91,5 @@ numiter=GetSetProperty(W_Broadcast.descr_get_numiter), nd=GetSetProperty(W_Broadcast.descr_get_number_of_dimensions), iters=GetSetProperty(W_Broadcast.descr_get_iters), + reset=interp2app(W_Broadcast.descr_reset), ) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -76,7 +76,7 @@ base.get_order(), w_instance=base) return loop.flatiter_getitem(res, self.iter, state, step) finally: - self.iter.reset(self.state, mutate=True) + self.reset() def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or @@ -96,11 +96,14 @@ arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: - self.iter.reset(self.state, mutate=True) + self.reset() def descr___array_wrap__(self, space, obj, w_context=None): return obj + def reset(self): + self.iter.reset(self.state, mutate=True) + W_FlatIterator.typedef = TypeDef("numpy.flatiter", base = GetSetProperty(W_FlatIterator.descr_base), index = GetSetProperty(W_FlatIterator.descr_index), diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -123,3 +123,15 @@ assert step_in_y == y[0, 0] # == 3 assert step_in_broadcast == (1, 3) assert step2_in_y == y[1, 0] # == 4 + + def test_broadcast_reset(self): + import numpy as np + x = np.array([1, 2, 3]) + y = np.array([[4], [5], [6]]) + + b = np.broadcast(x, y) + b.next(), b.next(), b.next() + b.reset() + + assert b.index == 0 + assert b.next() == (1, 4) From pypy.commits at gmail.com Sat Apr 30 16:13:35 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 13:13:35 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: add failing test - W_Broadcast.__init__ Message-ID: <572511ef.01341c0a.ce5fc.385b@mx.google.com> Author: Matti Picus Branch: numpy_broadcast_nd Changeset: r84069:e1729cba05e7 Date: 2016-04-30 22:42 +0300 http://bitbucket.org/pypy/pypy/changeset/e1729cba05e7/ Log: add failing test - W_Broadcast.__init__ diff --git a/pypy/module/micronumpy/test/test_broadcast.py b/pypy/module/micronumpy/test/test_broadcast.py --- a/pypy/module/micronumpy/test/test_broadcast.py +++ b/pypy/module/micronumpy/test/test_broadcast.py @@ -136,4 +136,19 @@ assert b.index == 0 assert b.next() == (1, 4) - + def test_broadcast_in_args(self): + # gh-5881 + import numpy as np + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + print [mit.shape for mit in mits] + for mit in mits: + assert mit.shape == (5, 6, 7) + assert mit.nd == 3 + assert mit.numiter == 4 + for a, ia in zip(arrs, mit.iters): + assert a is ia.base + From pypy.commits at gmail.com Sat Apr 30 16:13:37 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 13:13:37 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: try to treat W_Broadcast as true W_NumpyObjects Message-ID: <572511f1.42191c0a.97acd.4864@mx.google.com> Author: Matti Picus Branch: numpy_broadcast_nd Changeset: r84070:96c7090938b3 Date: 2016-04-30 23:12 +0300 http://bitbucket.org/pypy/pypy/changeset/96c7090938b3/ Log: try to treat W_Broadcast as true W_NumpyObjects diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -169,6 +169,6 @@ def convert_to_array(space, w_obj): from pypy.module.micronumpy.ctors import array - if isinstance(w_obj, W_NDimArray): + if isinstance(w_obj, W_NumpyObject) and not w_obj.is_scalar(): return w_obj return array(space, w_obj) diff --git a/pypy/module/micronumpy/broadcast.py b/pypy/module/micronumpy/broadcast.py --- a/pypy/module/micronumpy/broadcast.py +++ b/pypy/module/micronumpy/broadcast.py @@ -42,6 +42,21 @@ self.done = False + def get_shape(self): + return self.shape + + def get_order(self): + return self.order + + def get_dtype(self): + return self.seq[0].get_dtype() #XXX Fixme + + def get_size(self): + return 0 #XXX Fixme + + def create_iter(self, shape=None, backward_broadcast=False): + return self, self.list_iter_state # XXX Fixme + def descr_iter(self, space): return space.wrap(self) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit from pypy.module.micronumpy import support, constants as NPY -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.base import W_NDimArray, W_NumpyObject # structures to describe slicing @@ -218,7 +218,7 @@ def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: return shape1 - assert isinstance(w_arr2, W_NDimArray) + assert isinstance(w_arr2, W_NumpyObject) shape2 = w_arr2.get_shape() ret = _shape_agreement(shape1, shape2) if len(ret) < max(len(shape1), len(shape2)): From pypy.commits at gmail.com Sat Apr 30 16:30:04 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 13:30:04 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-werror: merge default into branch Message-ID: <572515cc.4374c20a.52888.35ed@mx.google.com> Author: Matti Picus Branch: cpyext-werror Changeset: r84071:1846e020572e Date: 2016-04-30 23:25 +0300 http://bitbucket.org/pypy/pypy/changeset/1846e020572e/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -24,7 +24,11 @@ remove-objspace-options. .. branch: cpyext-for-merge -Update cpyext C-API support: + +Update cpyext C-API support After this branch, we are almost able to support +upstream numpy via cpyext, so we created (yet another) fork of numpy at +github.com/pypy/numpy with the needed changes. Among the significant changes +to cpyext: - allow c-snippet tests to be run with -A so we can verify we are compatible - fix many edge cases exposed by fixing tests to run with -A - issequence() logic matches cpython @@ -40,6 +44,8 @@ - rewrite slot assignment for typeobjects - improve tracking of PyObject to rpython object mapping - support tp_as_{number, sequence, mapping, buffer} slots -After this branch, we are almost able to support upstream numpy via cpyext, so -we created (yet another) fork of numpy at github.com/pypy/numpy with the needed -changes + +.. branch: share-mapdict-methods-2 + +Reduce generated code for subclasses by using the same function objects in all +generated subclasses. diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -253,12 +253,8 @@ check_return, w_signature): funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) - # XXX For some reason funcs[i] segfaults, but this does not: - # cast(gufunctype, cast(CArrayPtr(CArrayPtr(gufunctype)), funcs)[i]) - # Something is very wrong here. - funcs_wrong_type = rffi.cast(rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), funcs) for i in range(ntypes): - funcs_w[i] = ufuncs.W_GenericUFuncCaller(rffi.cast(gufunctype, funcs_wrong_type[i]), data) + funcs_w[i] = ufuncs.W_GenericUFuncCaller(funcs[i], data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -366,7 +366,7 @@ def test_ufunc(self): if self.runappdirect: from numpy import arange - py.test.xfail('why does this segfault on cpython?') + py.test.xfail('segfaults on cpython: PyUFunc_API == NULL?') else: from _numpypy.multiarray import arange mod = self.import_extension('foo', [ diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -231,17 +231,7 @@ assert max_n >= 0 ITEM = A.OF ctypes_item = get_ctypes_type(ITEM, delayed_builders) - # Python 2.5 ctypes can raise OverflowError on 64-bit builds - for n in [maxint, 2**31]: - MAX_SIZE = n/64 - try: - PtrType = ctypes.POINTER(MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass # ^^^ bah, blame ctypes - else: - break - else: - raise e + ctypes_item_ptr = ctypes.POINTER(ctypes_item) class CArray(ctypes.Structure): if is_emulated_long: @@ -265,35 +255,9 @@ bigarray.length = n return bigarray - _ptrtype = None - - @classmethod - def _get_ptrtype(cls): - if cls._ptrtype: - return cls._ptrtype - # ctypes can raise OverflowError on 64-bit builds - # on windows it raises AttributeError even for 2**31 (_length_ missing) - if _MS_WINDOWS: - other_limit = 2**31-1 - else: - other_limit = 2**31 - for n in [maxint, other_limit]: - cls.MAX_SIZE = n / ctypes.sizeof(ctypes_item) - try: - cls._ptrtype = ctypes.POINTER(cls.MAX_SIZE * ctypes_item) - except (OverflowError, AttributeError), e: - pass - else: - break - else: - raise e - return cls._ptrtype - def _indexable(self, index): - PtrType = self._get_ptrtype() - assert index + 1 < self.MAX_SIZE - p = ctypes.cast(ctypes.pointer(self.items), PtrType) - return p.contents + p = ctypes.cast(self.items, ctypes_item_ptr) + return p def _getitem(self, index, boundscheck=True): if boundscheck: @@ -1045,12 +1009,22 @@ container = _array_of_known_length(T.TO) container._storage = type(cobj)(cobj.contents) elif isinstance(T.TO, lltype.FuncType): + # cobj is a CFunctionType object. We naively think + # that it should be a function pointer. No no no. If + # it was read out of an array, say, then it is a *pointer* + # to a function pointer. In other words, the read doesn't + # read anything, it just takes the address of the function + # pointer inside the array. If later the array is modified + # or goes out of scope, then we crash. CTypes is fun. + # It works if we cast it now to an int and back. cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) if cobjkey in _int2obj: container = _int2obj[cobjkey] else: + name = getattr(cobj, '__name__', '?') + cobj = ctypes.cast(cobjkey, type(cobj)) _callable = get_ctypes_trampoline(T.TO, cobj) - return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'), + return lltype.functionptr(T.TO, name, _callable=_callable) elif isinstance(T.TO, lltype.OpaqueType): if T == llmemory.GCREF: diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -1405,6 +1405,45 @@ a2 = ctypes2lltype(lltype.Ptr(A), lltype2ctypes(a)) assert a2._obj.getitem(0)._obj._parentstructure() is a2._obj + def test_array_of_function_pointers(self): + c_source = py.code.Source(r""" + #include "src/precommondefs.h" + #include + + typedef int(*funcptr_t)(void); + static int forty_two(void) { return 42; } + static int forty_three(void) { return 43; } + static funcptr_t testarray[2]; + RPY_EXPORTED void runtest(void cb(funcptr_t *)) { + testarray[0] = &forty_two; + testarray[1] = &forty_three; + fprintf(stderr, "&forty_two = %p\n", testarray[0]); + fprintf(stderr, "&forty_three = %p\n", testarray[1]); + cb(testarray); + testarray[0] = 0; + testarray[1] = 0; + } + """) + eci = ExternalCompilationInfo(include_dirs=[cdir], + separate_module_sources=[c_source]) + + PtrF = lltype.Ptr(lltype.FuncType([], rffi.INT)) + ArrayPtrF = rffi.CArrayPtr(PtrF) + CALLBACK = rffi.CCallback([ArrayPtrF], lltype.Void) + + runtest = rffi.llexternal('runtest', [CALLBACK], lltype.Void, + compilation_info=eci) + seen = [] + + def callback(testarray): + seen.append(testarray[0]) # read a PtrF out of testarray + seen.append(testarray[1]) + + runtest(callback) + assert seen[0]() == 42 + assert seen[1]() == 43 + + class TestPlatform(object): def test_lib_on_libpaths(self): from rpython.translator.platform import platform diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -544,6 +544,21 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) +class __extend__(pairtype(FunctionReprBase, FunctionReprBase)): + def rtype_is_((robj1, robj2), hop): + if hop.s_result.is_constant(): + return inputconst(Bool, hop.s_result.const) + s_pbc = annmodel.unionof(robj1.s_pbc, robj2.s_pbc) + r_pbc = hop.rtyper.getrepr(s_pbc) + v1, v2 = hop.inputargs(r_pbc, r_pbc) + assert v1.concretetype == v2.concretetype + if v1.concretetype == Char: + return hop.genop('char_eq', [v1, v2], resulttype=Bool) + elif isinstance(v1.concretetype, Ptr): + return hop.genop('ptr_eq', [v1, v2], resulttype=Bool) + else: + raise TyperError("unknown type %r" % (v1.concretetype,)) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1497,6 +1497,47 @@ res = self.interpret(f, [2]) assert res == False + def test_is_among_functions_2(self): + def g1(): pass + def g2(): pass + def f(n): + if n > 5: + g = g2 + else: + g = g1 + g() + return g is g2 + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [8]) + assert res == True + + def test_is_among_functions_3(self): + def g0(): pass + def g1(): pass + def g2(): pass + def g3(): pass + def g4(): pass + def g5(): pass + def g6(): pass + def g7(): pass + glist = [g0, g1, g2, g3, g4, g5, g6, g7] + def f(n): + if n > 5: + g = g2 + else: + g = g1 + h = glist[n] + g() + h() + return g is h + res = self.interpret(f, [2]) + assert res == False + res = self.interpret(f, [1]) + assert res == True + res = self.interpret(f, [6]) + assert res == False + def test_shrink_pbc_set(self): def g1(): return 10 From pypy.commits at gmail.com Sat Apr 30 16:52:01 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sat, 30 Apr 2016 13:52:01 -0700 (PDT) Subject: [pypy-commit] pypy py3k: minor cleanup Message-ID: <57251af1.50301c0a.6138.52c0@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r84072:f424e2be88dd Date: 2016-04-30 13:49 -0700 http://bitbucket.org/pypy/pypy/changeset/f424e2be88dd/ Log: minor cleanup diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -836,39 +836,32 @@ class W_TeeChainedListNode(W_Root): def __init__(self, space): - self.space = space self.w_next = None self.w_obj = None - def reduce_w(self): + def reduce_w(self, space): list_w = [] node = self - while node is not None: - if node.w_obj is not None: - list_w.append(node.w_obj) - node = node.w_next - else: - break - space = self.space - if list_w: - return self.space.newtuple([space.type(self), - space.newtuple([]), - space.newtuple([space.newlist(list_w)]) - ]) - else: - return self.space.newtuple([space.type(self), - space.newtuple([])]) + while node is not None and node.w_obj is not None: + list_w.append(node.w_obj) + node = node.w_next + if not list_w: + return space.newtuple([space.type(self), space.newtuple([])]) + return space.newtuple( + [space.type(self), + space.newtuple([]), + space.newtuple([space.newlist(list_w)]) + ]) def descr_setstate(self, space, w_state): state = space.unpackiterable(w_state) if len(state) != 1: - raise OperationError(space.w_ValueError, - space.wrap("invalid arguments")) + raise oefmt(space.w_ValueError, "invalid arguments") obj_list_w = space.unpackiterable(state[0]) node = self for w_obj in obj_list_w: node.w_obj = w_obj - node.w_next = W_TeeChainedListNode(self.space) + node.w_next = W_TeeChainedListNode(space) node = node.w_next def W_TeeChainedListNode___new__(space, w_subtype): @@ -883,7 +876,6 @@ __reduce__ = interp2app(W_TeeChainedListNode.reduce_w), __setstate__ = interp2app(W_TeeChainedListNode.descr_setstate) ) - W_TeeChainedListNode.typedef.acceptable_as_base_class = False class W_TeeIterable(W_Root): From pypy.commits at gmail.com Sat Apr 30 16:52:03 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sat, 30 Apr 2016 13:52:03 -0700 (PDT) Subject: [pypy-commit] pypy py3k: minor cleanup Message-ID: <57251af3.2b30c20a.a01e5.3b31@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r84073:ee02e1bd2dd3 Date: 2016-04-30 13:50 -0700 http://bitbucket.org/pypy/pypy/changeset/ee02e1bd2dd3/ Log: minor cleanup diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -167,11 +167,7 @@ def path_or_fd(allow_fd=True): return _PathOrFd if allow_fd else _JustPath - -if hasattr(rposix, 'AT_FDCWD'): - DEFAULT_DIR_FD = rposix.AT_FDCWD -else: - DEFAULT_DIR_FD = -100 +DEFAULT_DIR_FD = getattr(rposix, 'AT_FDCWD', -100) DIR_FD_AVAILABLE = False @specialize.arg(2) @@ -204,10 +200,8 @@ dir_fd = unwrap_fd(space, w_value) if dir_fd == DEFAULT_DIR_FD: return dir_fd - else: - raise oefmt( - space.w_NotImplementedError, - "dir_fd unavailable on this platform") + raise oefmt(space.w_NotImplementedError, + "dir_fd unavailable on this platform") def DirFD(available=False): return _DirFD if available else _DirFD_Unavailable @@ -559,7 +553,8 @@ raise wrap_oserror(space, e) @unwrap_spec(mode=c_int, - dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) + dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=kwonly(bool), + follow_symlinks=kwonly(bool)) def access(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, effective_ids=True, follow_symlinks=True): """\ @@ -903,7 +898,8 @@ raise wrap_oserror(space, e) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) - at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), follow_symlinks=kwonly(bool)) + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), + follow_symlinks=kwonly(bool)) def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """chmod(path, mode, *, dir_fd=None, follow_symlinks=True) @@ -924,12 +920,11 @@ if not rposix.HAVE_FCHMODAT: if not follow_symlinks: raise argument_unavailable(space, "chmod", "follow_symlinks") - else: - try: - dispatch_filename(rposix.chmod)(space, w_path, mode) - return - except OSError as e: - raise wrap_oserror2(space, e, w_path) + try: + dispatch_filename(rposix.chmod)(space, w_path, mode) + return + except OSError as e: + raise wrap_oserror2(space, e, w_path) try: path = space.fsencode_w(w_path) @@ -947,8 +942,7 @@ # fchmodat() doesn't actually implement follow_symlinks=False # so raise NotImplementedError in this case raise argument_unavailable(space, "chmod", "follow_symlinks") - else: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path) def _chmod_path(path, mode, dir_fd, follow_symlinks): if dir_fd != DEFAULT_DIR_FD or not follow_symlinks: @@ -1359,7 +1353,8 @@ path=path_or_fd(allow_fd=rposix.HAVE_FUTIMENS), w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)), dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) -def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): +def utime(space, path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, + follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. From pypy.commits at gmail.com Sat Apr 30 16:13:33 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 13:13:33 -0700 (PDT) Subject: [pypy-commit] pypy numpy_broadcast_nd: merge default into branch Message-ID: <572511ed.2450c20a.813c0.28c0@mx.google.com> Author: Matti Picus Branch: numpy_broadcast_nd Changeset: r84068:4d1e324b4a8e Date: 2016-04-30 22:40 +0300 http://bitbucket.org/pypy/pypy/changeset/4d1e324b4a8e/ Log: merge default into branch diff too long, truncating to 2000 out of 12991 lines diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,2 @@ +* reduce size of generated c code from slot definitions in slotdefs. +* remove broken DEBUG_REFCOUNT from pyobject.py diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py --- a/lib-python/2.7/distutils/cmd.py +++ b/lib-python/2.7/distutils/cmd.py @@ -298,8 +298,16 @@ src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: - setattr(self, dst_option, - getattr(src_cmd_obj, src_option)) + try: + setattr(self, dst_option, + getattr(src_cmd_obj, src_option)) + except AttributeError: + # This was added after problems with setuptools 18.4. + # It seems that setuptools 20.9 fixes the problem. + # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv + # if I say "virtualenv -p pypy venv-pypy" then it + # just installs setuptools 18.4 from some cache... + pass def get_finalized_command(self, command, create=1): diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt --- a/lib-python/stdlib-upgrade.txt +++ b/lib-python/stdlib-upgrade.txt @@ -5,15 +5,23 @@ overly detailed -1. check out the branch vendor/stdlib +0. make sure your working dir is clean +1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k) + or create branch vendor/stdlib-3-* 2. upgrade the files there + 2a. remove lib-python/2.7/ or lib-python/3/ + 2b. copy the files from the cpython repo + 2c. hg add lib-python/2.7/ or lib-python/3/ + 2d. hg remove --after + 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'` + 2f. fix copies / renames manually by running `hg copy --after ` for each copied file 3. update stdlib-version.txt with the output of hg -id from the cpython repo 4. commit -5. update to default/py3k +5. update to default / py3k 6. create a integration branch for the new stdlib (just hg branch stdlib-$version) -7. merge vendor/stdlib +7. merge vendor/stdlib or vendor/stdlib-3-* 8. commit 10. fix issues 11. commit --close-branch -12. merge to default +12. merge to default / py3k diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -204,15 +204,6 @@ BoolOption("withstrbuf", "use strings optimized for addition (ver 2)", default=False), - BoolOption("withprebuiltchar", - "use prebuilt single-character string objects", - default=False), - - BoolOption("sharesmallstr", - "always reuse the prebuilt string objects " - "(the empty string and potentially single-char strings)", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -222,39 +213,14 @@ default=False, requires=[("objspace.honor__builtins__", False)]), - BoolOption("withmapdict", - "make instances really small but slow without the JIT", - default=False, - requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), - ]), - - BoolOption("withrangelist", - "enable special range list implementation that does not " - "actually create the full list until the resulting " - "list is mutated", - default=False), BoolOption("withliststrategies", "enable optimized ways to store lists of primitives ", default=True), - BoolOption("withtypeversion", - "version type objects when changing them", - cmdline=None, - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), - - BoolOption("withmethodcache", - "try to cache method lookups", - default=False, - requires=[("objspace.std.withtypeversion", True), - ("translation.rweakref", True)]), BoolOption("withmethodcachecounter", "try to cache methods and provide a counter in __pypy__. " "for testing purposes only.", - default=False, - requires=[("objspace.std.withmethodcache", True)]), + default=False), IntOption("methodcachesizeexp", " 2 ** methodcachesizeexp is the size of the of the method cache ", default=11), @@ -265,22 +231,10 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("getattributeshortcut", - "track types that override __getattribute__", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), + default=False), - BoolOption("withidentitydict", - "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", - default=False, - # weakrefs needed, because of get_subclasses() - requires=[("translation.rweakref", True)]), ]), ]) @@ -296,15 +250,10 @@ """ # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withmethodcache=True) - config.objspace.std.suggest(withprebuiltchar=True) config.objspace.std.suggest(intshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) - config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) config.objspace.std.suggest(withspecialisedtuple=True) - config.objspace.std.suggest(withidentitydict=True) #if not IS_64_BITS: # config.objspace.std.suggest(withsmalllong=True) @@ -317,16 +266,13 @@ # memory-saving optimizations if level == 'mem': config.objspace.std.suggest(withprebuiltint=True) - config.objspace.std.suggest(withrangelist=True) - config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(withmapdict=True) + config.objspace.std.suggest(withliststrategies=True) if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) - config.objspace.std.suggest(withmapdict=True) def enable_allworkingmodules(config): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -11,12 +11,6 @@ assert conf.objspace.usemodules.gc - conf.objspace.std.withmapdict = True - assert conf.objspace.std.withtypeversion - conf = get_pypy_config() - conf.objspace.std.withtypeversion = False - py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") - def test_conflicting_gcrootfinder(): conf = get_pypy_config() conf.translation.gc = "boehm" @@ -47,18 +41,10 @@ def test_set_pypy_opt_level(): conf = get_pypy_config() set_pypy_opt_level(conf, '2') - assert conf.objspace.std.getattributeshortcut + assert conf.objspace.std.intshortcut conf = get_pypy_config() set_pypy_opt_level(conf, '0') - assert not conf.objspace.std.getattributeshortcut - -def test_rweakref_required(): - conf = get_pypy_config() - conf.translation.rweakref = False - set_pypy_opt_level(conf, '3') - - assert not conf.objspace.std.withtypeversion - assert not conf.objspace.std.withmethodcache + assert not conf.objspace.std.intshortcut def test_check_documentation(): def check_file_exists(fn): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -108,9 +108,9 @@ On Fedora:: - yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel - (XXX plus the Febora version of libgdbm-dev and tk-dev) + dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \ + gdbm-devel For the optional lzma module on PyPy3 you will also need ``xz-devel``. diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.getattributeshortcut.txt +++ /dev/null @@ -1,1 +0,0 @@ -Performance only: track types that override __getattribute__. diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt --- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt +++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt @@ -1,1 +1,1 @@ -Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`. +Set the cache size (number of entries) for the method cache. diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withidentitydict.txt +++ /dev/null @@ -1,21 +0,0 @@ -============================= -objspace.std.withidentitydict -============================= - -* **name:** withidentitydict - -* **description:** enable a dictionary strategy for "by identity" comparisons - -* **command-line:** --objspace-std-withidentitydict - -* **command-line for negation:** --no-objspace-std-withidentitydict - -* **option type:** boolean option - -* **default:** True - - -Enable a dictionary strategy specialized for instances of classes which -compares "by identity", which is the default unless you override ``__hash__``, -``__eq__`` or ``__cmp__``. This strategy will be used only with new-style -classes. diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmapdict.txt +++ /dev/null @@ -1,5 +0,0 @@ -Enable the new version of "sharing dictionaries". - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withmethodcache.txt +++ /dev/null @@ -1,2 +0,0 @@ -Enable method caching. See the section "Method Caching" in `Standard -Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__. diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt --- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt +++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt @@ -1,1 +1,1 @@ -Testing/debug option for :config:`objspace.std.withmethodcache`. +Testing/debug option for the method cache. diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt deleted file mode 100644 diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withrangelist.txt +++ /dev/null @@ -1,11 +0,0 @@ -Enable "range list" objects. They are an additional implementation of the Python -``list`` type, indistinguishable for the normal user. Whenever the ``range`` -builtin is called, an range list is returned. As long as this list is not -mutated (and for example only iterated over), it uses only enough memory to -store the start, stop and step of the range. This makes using ``range`` as -efficient as ``xrange``, as long as the result is only used in a ``for``-loop. - -See the section in `Standard Interpreter Optimizations`_ for more details. - -.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists - diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.withtypeversion.txt +++ /dev/null @@ -1,6 +0,0 @@ -This (mostly internal) option enables "type versions": Every type object gets an -(only internally visible) version that is updated when the type's dict is -changed. This is e.g. used for invalidating caches. It does not make sense to -enable this option alone. - -.. internal diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -12,9 +12,9 @@ The work on the cling backend has so far been done only for CPython, but bringing it to PyPy is a lot less work than developing it in the first place. -.. _Reflex: http://root.cern.ch/drupal/content/reflex -.. _CINT: http://root.cern.ch/drupal/content/cint -.. _cling: http://root.cern.ch/drupal/content/cling +.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _CINT: https://root.cern.ch/introduction-cint +.. _cling: https://root.cern.ch/cling .. _llvm: http://llvm.org/ .. _clang: http://clang.llvm.org/ @@ -283,7 +283,8 @@ core reflection set, but for the moment assume we want to have it in the reflection library that we are building for this example. -The ``genreflex`` script can be steered using a so-called `selection file`_, +The ``genreflex`` script can be steered using a so-called `selection file`_ +(see "Generating Reflex Dictionaries") which is a simple XML file specifying, either explicitly or by using a pattern, which classes, variables, namespaces, etc. to select from the given header file. @@ -305,7 +306,7 @@ -.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries +.. _selection file: https://root.cern.ch/how/how-use-reflex Now the reflection info can be generated and compiled:: @@ -811,7 +812,7 @@ immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment variable. -.. _PyROOT: http://root.cern.ch/drupal/content/pyroot +.. _PyROOT: https://root.cern.ch/pyroot There are a couple of minor differences between PyCintex and cppyy, most to do with naming. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -387,6 +387,14 @@ wrappers. On PyPy we can't tell the difference, so ``ismethod([].__add__) == ismethod(list.__add__) == True``. +* in CPython, the built-in types have attributes that can be + implemented in various ways. Depending on the way, if you try to + write to (or delete) a read-only (or undeletable) attribute, you get + either a ``TypeError`` or an ``AttributeError``. PyPy tries to + strike some middle ground between full consistency and full + compatibility here. This means that a few corner cases don't raise + the same exception, like ``del (lambda:None).__closure__``. + * in pure Python, if you write ``class A(object): def f(self): pass`` and have a subclass ``B`` which doesn't override ``f()``, then ``B.f(x)`` still checks that ``x`` is an instance of ``B``. In diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -21,7 +21,7 @@ :source:`pypy/doc/discussion/` drafts of ideas and documentation -:source:`pypy/goal/` our :ref:`main PyPy-translation scripts ` +:source:`pypy/goal/` our main PyPy-translation scripts live here :source:`pypy/interpreter/` :doc:`bytecode interpreter ` and related objects diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -13,3 +13,4 @@ discussion/improve-rpython discussion/ctypes-implementation discussion/jit-profiler + discussion/rawrefcount diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -79,7 +79,7 @@ :doc:`Full details ` are `available here `. .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: http://root.cern.ch/drupal/content/reflex +.. _Reflex: https://root.cern.ch/how/how-use-reflex RPython Mixed Modules diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -106,20 +106,33 @@ For information on which third party extensions work (or do not work) with PyPy see the `compatibility wiki`_. +For more information about how we manage refcounting semamtics see +rawrefcount_ + .. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ +.. _rawrefcount: discussion/rawrefcount.html On which platforms does PyPy run? --------------------------------- -PyPy is regularly and extensively tested on Linux machines. It mostly +PyPy currently supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +PyPy is regularly and extensively tested on Linux machines. It works on Mac and Windows: it is tested there, but most of us are running -Linux so fixes may depend on 3rd-party contributions. PyPy's JIT -works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7). -Support for POWER (64-bit) is stalled at the moment. +Linux so fixes may depend on 3rd-party contributions. -To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or +To bootstrap from sources, PyPy can use either CPython 2.7 or another (e.g. older) PyPy. Cross-translation is not really supported: e.g. to build a 32-bit PyPy, you need to have a 32-bit environment. Cross-translation is only explicitly supported between a 32-bit Intel diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst --- a/pypy/doc/interpreter-optimizations.rst +++ b/pypy/doc/interpreter-optimizations.rst @@ -62,29 +62,37 @@ Dictionary Optimizations ~~~~~~~~~~~~~~~~~~~~~~~~ -Multi-Dicts -+++++++++++ +Dict Strategies +++++++++++++++++ -Multi-dicts are a special implementation of dictionaries. It became clear that -it is very useful to *change* the internal representation of an object during -its lifetime. Multi-dicts are a general way to do that for dictionaries: they -provide generic support for the switching of internal representations for -dicts. +Dict strategies are an implementation approach for dictionaries (and lists) +that make it possible to use a specialized representation of the dictionary's +data, while still being able to switch back to a general representation should +that become necessary later. -If you just enable multi-dicts, special representations for empty dictionaries, -for string-keyed dictionaries. In addition there are more specialized dictionary -implementations for various purposes (see below). +Dict strategies are always enabled, by default there are special strategies for +dicts with just string keys, just unicode keys and just integer keys. If one of +those specialized strategies is used, then dict lookup can use much faster +hashing and comparison for the dict keys. There is of course also a strategy +for general keys. -This is now the default implementation of dictionaries in the Python interpreter. +Identity Dicts ++++++++++++++++ -Sharing Dicts +We also have a strategy specialized for keys that are instances of classes +which compares "by identity", which is the default unless you override +``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with +new-style classes. + + +Map Dicts +++++++++++++ -Sharing dictionaries are a special representation used together with multidicts. -This dict representation is used only for instance dictionaries and tries to -make instance dictionaries use less memory (in fact, in the ideal case the -memory behaviour should be mostly like that of using __slots__). +Map dictionaries are a special representation used together with dict strategies. +This dict strategy is used only for instance dictionaries and tries to +make instance dictionaries use less memory (in fact, usually memory behaviour +should be mostly like that of using ``__slots__``). The idea is the following: Most instances of the same class have very similar attributes, and are even adding these keys to the dictionary in the same order @@ -95,8 +103,6 @@ dicts: the representation of the instance dict contains only a list of values. -A more advanced version of sharing dicts, called *map dicts,* is available -with the :config:`objspace.std.withmapdict` option. List Optimizations @@ -114,8 +120,8 @@ created. This gives the memory and speed behaviour of ``xrange`` and the generality of use of ``range``, and makes ``xrange`` essentially useless. -You can enable this feature with the :config:`objspace.std.withrangelist` -option. +This feature is enabled by default as part of the +:config:`objspace.std.withliststrategies` option. User Class Optimizations @@ -133,8 +139,7 @@ base classes is changed). On subsequent lookups the cached version can be used, as long as the instance did not shadow any of its classes attributes. -You can enable this feature with the :config:`objspace.std.withmethodcache` -option. +This feature is enabled by default. Interpreter Optimizations diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -6,3 +6,46 @@ .. startrev: aa60332382a1 .. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046 + +.. branch: gcheader-decl + +Reduce the size of generated C sources. + + +.. branch: remove-objspace-options + +Remove a number of options from the build process that were never tested and +never set. Fix a performance bug in the method cache. + +.. branch: bitstring + +JIT: use bitstrings to compress the lists of read or written descrs +that we attach to EffectInfo. Fixes a problem we had in +remove-objspace-options. + +.. branch: cpyext-for-merge + +Update cpyext C-API support After this branch, we are almost able to support +upstream numpy via cpyext, so we created (yet another) fork of numpy at +github.com/pypy/numpy with the needed changes. Among the significant changes +to cpyext: + - allow c-snippet tests to be run with -A so we can verify we are compatible + - fix many edge cases exposed by fixing tests to run with -A + - issequence() logic matches cpython + - make PyStringObject and PyUnicodeObject field names compatible with cpython + - add prelminary support for PyDateTime_* + - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy, + PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile, + - PyAnySet_CheckExact, PyUnicode_Concat + - improve support for PyGILState_Ensure, PyGILState_Release, and thread + primitives, also find a case where CPython will allow thread creation + before PyEval_InitThreads is run, dissallow on PyPy + - create a PyObject-specific list strategy + - rewrite slot assignment for typeobjects + - improve tracking of PyObject to rpython object mapping + - support tp_as_{number, sequence, mapping, buffer} slots + +.. branch: share-mapdict-methods-2 + +Reduce generated code for subclasses by using the same function objects in all +generated subclasses. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1176,7 +1176,27 @@ return self.w_False def issequence_w(self, w_obj): - return (self.findattr(w_obj, self.wrap("__getitem__")) is not None) + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return False + elif flag == 'S': + return True + else: + return (self.lookup(w_obj, '__getitem__') is not None) + + def ismapping_w(self, w_obj): + if self.is_oldstyle_instance(w_obj): + return (self.findattr(w_obj, self.wrap('__getitem__')) is not None) + flag = self.type(w_obj).flag_map_or_seq + if flag == 'M': + return True + elif flag == 'S': + return False + else: + return (self.lookup(w_obj, '__getitem__') is not None and + self.lookup(w_obj, '__getslice__') is None) # The code below only works # for the simple case (new-style instance). diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -214,6 +214,7 @@ self._trace(frame, 'exception', None, operationerr) #operationerr.print_detailed_traceback(self.space) + @jit.dont_look_inside @specialize.arg(1) def sys_exc_info(self, for_hidden=False): """Implements sys.exc_info(). @@ -225,15 +226,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - frame = self.gettopframe() - while frame: - if frame.last_exception is not None: - if ((for_hidden or not frame.hide()) or - frame.last_exception is - get_cleared_operation_error(self.space)): - return frame.last_exception - frame = frame.f_backref() - return None + return self.gettopframe()._exc_info_unroll(self.space, for_hidden) def set_sys_exc_info(self, operror): frame = self.gettopframe_nohidden() diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -114,6 +114,7 @@ e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): + from pypy.objspace.std.mapdict import init_mapdict_cache if self.co_cellvars: argcount = self.co_argcount assert argcount >= 0 # annotator hint @@ -149,9 +150,7 @@ self._compute_flatcall() - if self.space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import init_mapdict_cache - init_mapdict_cache(self) + init_mapdict_cache(self) def _init_ready(self): "This is a hook for the vmprof module, which overrides this method." @@ -163,7 +162,10 @@ # When translating PyPy, freeze the file name # /lastdirname/basename.py # instead of freezing the complete translation-time path. - filename = self.co_filename.lstrip('<').rstrip('>') + filename = self.co_filename + if filename.startswith(''): + return + filename = filename.lstrip('<').rstrip('>') if filename.lower().endswith('.pyc'): filename = filename[:-1] basename = os.path.basename(filename) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,7 +4,7 @@ from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg from rpython.rlib.jit import hint -from rpython.rlib.objectmodel import we_are_translated, instantiate +from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint from rpython.tool.pairtype import extendabletype @@ -12,7 +12,8 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import ( + OperationError, get_cleared_operation_error, oefmt) from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -870,6 +871,22 @@ return space.wrap(self.builtin is not space.builtin) return space.w_False + @jit.unroll_safe + @specialize.arg(2) + def _exc_info_unroll(self, space, for_hidden=False): + """Return the most recent OperationError being handled in the + call stack + """ + frame = self + while frame: + last = frame.last_exception + if last is not None: + if last is get_cleared_operation_error(self.space): + break + if for_hidden or not frame.hide(): + return last + frame = frame.f_backref() + return None # ____________________________________________________________ diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -739,25 +739,16 @@ unroller = SContinueLoop(startofloop) return self.unrollstack_and_jump(unroller) - @jit.unroll_safe def RAISE_VARARGS(self, nbargs, next_instr): space = self.space if nbargs == 0: - frame = self - while frame: - if frame.last_exception is not None: - operror = frame.last_exception - break - frame = frame.f_backref() - else: - raise OperationError(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) - if operror.w_type is space.w_None: - raise OperationError(space.w_TypeError, - space.wrap("raise: the exception to re-raise was cleared")) + last_operr = self._exc_info_unroll(space, for_hidden=True) + if last_operr is None: + raise oefmt(space.w_TypeError, + "No active exception to reraise") # re-raise, no new traceback obj will be attached - self.last_exception = operror - raise RaiseWithExplicitTraceback(operror) + self.last_exception = last_operr + raise RaiseWithExplicitTraceback(last_operr) w_value = w_traceback = space.w_None if nbargs >= 3: @@ -951,8 +942,7 @@ def LOAD_ATTR(self, nameindex, next_instr): "obj.attributename" w_obj = self.popvalue() - if (self.space.config.objspace.std.withmapdict - and not jit.we_are_jitted()): + if not jit.we_are_jitted(): from pypy.objspace.std.mapdict import LOAD_ATTR_caching w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex) else: diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -362,6 +362,45 @@ """) assert seen == [1] + def test_mapdict_number_of_slots(self): + space = self.space + a, b, c = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + a = A() + a.x = 1 + class B: + pass + b = B() + b.x = 1 + class C(int): + pass + c = C(1) + c.x = 1 + return a, b, c + """), 3) + assert not hasattr(a, "storage") + assert not hasattr(b, "storage") + assert hasattr(c, "storage") + + def test_del(self): + space = self.space + a, b, c, d = space.unpackiterable(space.appexec([], """(): + class A(object): + pass + class B(object): + def __del__(self): + pass + class F(file): + pass + class G(file): + def __del__(self): + pass + return A(), B(), F("xyz", "w"), G("ghi", "w") + """)) + assert type(b).__base__ is type(a) + assert hasattr(c, "__del__") + assert type(d) is type(c) class AppTestTypeDef: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -98,240 +98,100 @@ # reason is that it is missing a place to store the __dict__, the slots, # the weakref lifeline, and it typically has no interp-level __del__. # So we create a few interp-level subclasses of W_XxxObject, which add -# some combination of features. -# -# We don't build 2**4 == 16 subclasses for all combinations of requested -# features, but limit ourselves to 6, chosen a bit arbitrarily based on -# typical usage (case 1 is the most common kind of app-level subclasses; -# case 2 is the memory-saving kind defined with __slots__). -# -# +----------------------------------------------------------------+ -# | NOTE: if withmapdict is enabled, the following doesn't apply! | -# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to | -# | show up only when needed. In particular there is no way with | -# | mapdict to prevent some objects from being weakrefable. | -# +----------------------------------------------------------------+ -# -# dict slots del weakrefable -# -# 1. Y N N Y UserDictWeakref -# 2. N Y N N UserSlots -# 3. Y Y N Y UserDictWeakrefSlots -# 4. N Y N Y UserSlotsWeakref -# 5. Y Y Y Y UserDictWeakrefSlotsDel -# 6. N Y Y Y UserSlotsWeakrefDel -# -# Note that if the app-level explicitly requests no dict, we should not -# provide one, otherwise storing random attributes on the app-level -# instance would unexpectedly work. We don't care too much, though, if -# an object is weakrefable when it shouldn't really be. It's important -# that it has a __del__ only if absolutely needed, as this kills the -# performance of the GCs. -# -# Interp-level inheritance is like this: -# -# W_XxxObject base -# / \ -# 1 2 -# / \ -# 3 4 -# / \ -# 5 6 +# some combination of features. This is done using mapdict. -def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots, - needsdel=False, weakrefable=False): +# we need two subclasses of the app-level type, one to add mapdict, and then one +# to add del to not slow down the GC. + +def get_unique_interplevel_subclass(space, cls, needsdel=False): "NOT_RPYTHON: initialization-time only" if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): needsdel = False assert cls.typedef.acceptable_as_base_class - key = config, cls, hasdict, wants_slots, needsdel, weakrefable + key = space, cls, needsdel try: return _subclass_cache[key] except KeyError: - subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel, - weakrefable) + # XXX can save a class if cls already has a __del__ + keys = [key] + base_has_del = hasattr(cls, '__del__') + if base_has_del: + # if the base has a __del__, we only need one class + keys = [(space, cls, True), (space, cls, False)] + needsdel = True + elif needsdel: + cls = get_unique_interplevel_subclass(space, cls, False) + subcls = _getusercls(space, cls, needsdel) assert key not in _subclass_cache - _subclass_cache[key] = subcls + for key in keys: + _subclass_cache[key] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): +def _getusercls(space, cls, wants_del, reallywantdict=False): + from rpython.rlib import objectmodel + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.module.__builtin__.interp_classobj import W_InstanceObject + from pypy.objspace.std.mapdict import (BaseUserClassMapdict, + MapdictDictSupport, MapdictWeakrefSupport, + _make_storage_mixin_size_n, MapdictStorageMixin) typedef = cls.typedef - if wants_dict and typedef.hasdict: - wants_dict = False - if config.objspace.std.withmapdict and not typedef.hasdict: - # mapdict only works if the type does not already have a dict - if wants_del: - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots") - # Forest of if's - see the comment above. + name = cls.__name__ + "User" + + mixins_needed = [] + copy_methods = [] + mixins_needed = [] + name = cls.__name__ + if not cls.user_overridden_class: + if cls is W_ObjectObject or cls is W_InstanceObject: + mixins_needed.append(_make_storage_mixin_size_n()) + else: + mixins_needed.append(MapdictStorageMixin) + copy_methods = [BaseUserClassMapdict] + if reallywantdict or not typedef.hasdict: + # the type has no dict, mapdict to provide the dict + copy_methods.append(MapdictDictSupport) + name += "Dict" + if not typedef.weakrefable: + # the type does not support weakrefs yet, mapdict to provide weakref + # support + copy_methods.append(MapdictWeakrefSupport) + name += "Weakrefable" if wants_del: - if wants_dict: - # case 5. Parent class is 3. - parentcls = get_unique_interplevel_subclass(config, cls, True, True, - False, True) - else: - # case 6. Parent class is 4. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, True) - return _usersubclswithfeature(config, parentcls, "del") - elif wants_dict: - if wants_slots: - # case 3. Parent class is 1. - parentcls = get_unique_interplevel_subclass(config, cls, True, False, - False, True) - return _usersubclswithfeature(config, parentcls, "slots") - else: - # case 1 (we need to add weakrefable unless it's already in 'cls') - if not typedef.weakrefable: - return _usersubclswithfeature(config, cls, "user", "dict", "weakref") - else: - return _usersubclswithfeature(config, cls, "user", "dict") - else: - if weakrefable and not typedef.weakrefable: - # case 4. Parent class is 2. - parentcls = get_unique_interplevel_subclass(config, cls, False, True, - False, False) - return _usersubclswithfeature(config, parentcls, "weakref") - else: - # case 2 (if the base is already weakrefable, case 2 == case 4) - return _usersubclswithfeature(config, cls, "user", "slots") - -def _usersubclswithfeature(config, parentcls, *features): - key = config, parentcls, features - try: - return _usersubclswithfeature_cache[key] - except KeyError: - subcls = _builduserclswithfeature(config, parentcls, *features) - _usersubclswithfeature_cache[key] = subcls - return subcls -_usersubclswithfeature_cache = {} -_allusersubcls_cache = {} - -def _builduserclswithfeature(config, supercls, *features): - "NOT_RPYTHON: initialization-time only" - name = supercls.__name__ - name += ''.join([name.capitalize() for name in features]) - body = {} - #print '..........', name, '(', supercls.__name__, ')' - - def add(Proto): - for key, value in Proto.__dict__.items(): - if (not key.startswith('__') and not key.startswith('_mixin_') - or key == '__del__'): - if hasattr(value, "func_name"): - value = func_with_new_name(value, value.func_name) - body[key] = value - - if (config.objspace.std.withmapdict and "dict" in features): - from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin - add(BaseMapdictObject) - add(ObjectMixin) - body["user_overridden_class"] = True - features = () - - if "user" in features: # generic feature needed by all subcls - - class Proto(object): - user_overridden_class = True - - def getclass(self, space): - return promote(self.w__class__) - - def setclass(self, space, w_subtype): - # only used by descr_set___class__ - self.w__class__ = w_subtype - - def user_setup(self, space, w_subtype): - self.space = space - self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.layout.nslots) - - def user_setup_slots(self, nslots): - assert nslots == 0 - add(Proto) - - if "weakref" in features: - class Proto(object): - _lifeline_ = None - def getweakref(self): - return self._lifeline_ - def setweakref(self, space, weakreflifeline): - self._lifeline_ = weakreflifeline - def delweakref(self): - self._lifeline_ = None - add(Proto) - - if "del" in features: - parent_destructor = getattr(supercls, '__del__', None) + name += "Del" + parent_destructor = getattr(cls, '__del__', None) def call_parent_del(self): assert isinstance(self, subcls) parent_destructor(self) def call_applevel_del(self): assert isinstance(self, subcls) - self.space.userdel(self) + space.userdel(self) class Proto(object): def __del__(self): self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, call_applevel_del, + self.enqueue_for_destruction(space, call_applevel_del, 'method __del__ of ') if parent_destructor is not None: - self.enqueue_for_destruction(self.space, call_parent_del, + self.enqueue_for_destruction(space, call_parent_del, 'internal destructor of ') - add(Proto) + mixins_needed.append(Proto) - if "slots" in features: - class Proto(object): - slots_w = [] - def user_setup_slots(self, nslots): - if nslots > 0: - self.slots_w = [None] * nslots - def setslotvalue(self, index, w_value): - self.slots_w[index] = w_value - def delslotvalue(self, index): - if self.slots_w[index] is None: - return False - self.slots_w[index] = None - return True - def getslotvalue(self, index): - return self.slots_w[index] - add(Proto) - - if "dict" in features: - base_user_setup = supercls.user_setup.im_func - if "user_setup" in body: - base_user_setup = body["user_setup"] - class Proto(object): - def getdict(self, space): - return self.w__dict__ - - def setdict(self, space, w_dict): - self.w__dict__ = check_new_dictionary(space, w_dict) - - def user_setup(self, space, w_subtype): - self.w__dict__ = space.newdict( - instance=True) - base_user_setup(self, space, w_subtype) - - add(Proto) - - subcls = type(name, (supercls,), body) - _allusersubcls_cache[subcls] = True + class subcls(cls): + user_overridden_class = True + for base in mixins_needed: + objectmodel.import_from_mixin(base) + for copycls in copy_methods: + _copy_methods(copycls, subcls) + del subcls.base + subcls.__name__ = name return subcls -# a couple of helpers for the Proto classes above, factored out to reduce -# the translated code size -def check_new_dictionary(space, w_dict): - if not space.isinstance_w(w_dict, space.w_dict): - raise OperationError(space.w_TypeError, - space.wrap("setting dictionary to a non-dict")) - from pypy.objspace.std import dictmultiobject - assert isinstance(w_dict, dictmultiobject.W_DictMultiObject) - return w_dict -check_new_dictionary._dont_inline_ = True +def _copy_methods(copycls, subcls): + for key, value in copycls.__dict__.items(): + if (not key.startswith('__') or key == '__del__'): + setattr(subcls, key, value) + # ____________________________________________________________ diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -87,7 +87,7 @@ howmany = get_len_of_range(space, start, stop, step) - if space.config.objspace.std.withrangelist: + if space.config.objspace.std.withliststrategies: return range_withspecialized_implementation(space, start, step, howmany) res_w = [None] * howmany @@ -99,7 +99,7 @@ def range_withspecialized_implementation(space, start, step, length): - assert space.config.objspace.std.withrangelist + assert space.config.objspace.std.withliststrategies from pypy.objspace.std.listobject import make_range_list return make_range_list(space, start, step, length) diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -185,12 +185,19 @@ class Cache: def __init__(self, space): - from pypy.interpreter.typedef import _usersubclswithfeature - # evil - self.cls_without_del = _usersubclswithfeature( - space.config, W_InstanceObject, "dict", "weakref") - self.cls_with_del = _usersubclswithfeature( - space.config, self.cls_without_del, "del") + from pypy.interpreter.typedef import _getusercls + + if hasattr(space, 'is_fake_objspace'): + # hack: with the fake objspace, we don't want to see typedef's + # _getusercls() at all + self.cls_without_del = W_InstanceObject + self.cls_with_del = W_InstanceObject + return + + self.cls_without_del = _getusercls( + space, W_InstanceObject, False, reallywantdict=True) + self.cls_with_del = _getusercls( + space, W_InstanceObject, True, reallywantdict=True) def class_descr_call(space, w_self, __args__): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -748,10 +748,6 @@ raises(TypeError, delattr, A(), 42) -class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr): - spaceconfig = {"objspace.std.getattributeshortcut": True} - - class TestInternal: def test_execfile(self, space): fn = str(udir.join('test_execfile')) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1118,8 +1118,7 @@ assert getattr(c, u"x") == 1 -class AppTestOldStyleMapDict(AppTestOldstyle): - spaceconfig = {"objspace.std.withmapdict": True} +class AppTestOldStyleMapDict: def setup_class(cls): if cls.runappdirect: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -110,9 +110,8 @@ 'interp_magic.method_cache_counter') self.extra_interpdef('reset_method_cache_counter', 'interp_magic.reset_method_cache_counter') - if self.space.config.objspace.std.withmapdict: - self.extra_interpdef('mapdict_cache_counter', - 'interp_magic.mapdict_cache_counter') + self.extra_interpdef('mapdict_cache_counter', + 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) try: diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -37,17 +37,15 @@ cache = space.fromcache(MethodCache) cache.misses = {} cache.hits = {} - if space.config.objspace.std.withmapdict: - cache = space.fromcache(MapAttrCache) - cache.misses = {} - cache.hits = {} + cache = space.fromcache(MapAttrCache) + cache.misses = {} + cache.hits = {} @unwrap_spec(name=str) def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter - assert space.config.objspace.std.withmapdict cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,8 +1,7 @@ import py class AppTest(object): - spaceconfig = {"objspace.usemodules.select": False, - "objspace.std.withrangelist": True} + spaceconfig = {"objspace.usemodules.select": False} def setup_class(cls): if cls.runappdirect: @@ -61,6 +60,7 @@ import __pypy__ import sys + result = [False] @__pypy__.hidden_applevel def test_hidden_with_tb(): def not_hidden(): 1/0 @@ -69,9 +69,11 @@ assert sys.exc_info() == (None, None, None) tb = __pypy__.get_hidden_tb() assert tb.tb_frame.f_code.co_name == 'not_hidden' - return True + result[0] = True + raise else: return False - assert test_hidden_with_tb() + raises(ZeroDivisionError, test_hidden_with_tb) + assert result[0] def test_lookup_special(self): from __pypy__ import lookup_special diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -85,11 +87,13 @@ FILEP = rffi.COpaquePtr('FILE') if sys.platform == 'win32': - fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT) + dash = '_' else: - fileno = rffi.llexternal('fileno', [FILEP], rffi.INT) - + dash = '' +fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT) fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP) +fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING], + FILEP, save_err=rffi.RFFI_SAVE_ERRNO) _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT) def fclose(fp): @@ -119,16 +123,18 @@ def is_valid_fp(fp): return is_valid_fd(fileno(fp)) +pypy_decl = 'pypy_decl.h' + constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) -udir.join('pypy_decl.h').write("/* Will be filled later */\n") +udir.join(pypy_decl).write("/* Will be filled later */\n") udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) @@ -144,7 +150,7 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') @@ -152,6 +158,18 @@ headers.append(udir.join(name)) _copy_header_files(headers, dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) + + class NotSpecified(object): pass _NOT_SPECIFIED = NotSpecified() @@ -177,6 +195,61 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -196,6 +269,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -414,15 +490,14 @@ 'PyThread_acquire_lock', 'PyThread_release_lock', 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', - 'PyThread_ReInitTLS', + 'PyThread_ReInitTLS', 'PyThread_init_thread', + 'PyThread_start_new_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', 'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type', - 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto', - 'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag', 'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag', 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', @@ -431,11 +506,11 @@ ] TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), - '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), + '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), + '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'), + '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), + '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), } FORWARD_DECLS = [] @@ -461,6 +536,7 @@ "PyUnicode_Type": "space.w_unicode", "PyBaseString_Type": "space.w_basestring", "PyDict_Type": "space.w_dict", + "PyDictProxy_Type": "cpyext.dictobject.make_frozendict(space)", "PyTuple_Type": "space.w_tuple", "PyList_Type": "space.w_list", "PySet_Type": "space.w_set", @@ -484,7 +560,7 @@ 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) + GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject PyDictObject PyClassObject'''.split(): @@ -602,7 +678,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -610,8 +693,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -620,7 +722,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -629,6 +732,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) @@ -688,8 +793,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -782,6 +899,9 @@ structindex = {} for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + # added only for the macro, not the decl + continue restype, args = c_function_signature(db, func) members.append('%s (*%s)(%s);' % (restype, name, args)) structindex[name] = len(structindex) @@ -798,7 +918,7 @@ global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): - if "#" in name: + if '#' in name: continue if typ == 'PyDateTime_CAPI*': continue @@ -822,7 +942,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code) + eci = build_eci(True, export_symbols, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -834,7 +954,7 @@ ob = rawrefcount.next_dead(PyObject) if not ob: break - print ob + print 'deallocating PyObject', ob decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" @@ -853,8 +973,8 @@ for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) - if name.endswith('#'): - name = name[:-1] + if '#' in name: + name = name.split('#')[0] isptr = False else: isptr = True @@ -899,7 +1019,7 @@ # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): - if name.startswith('cpyext_'): # XXX hack + if name.startswith('cpyext_') or func is None: # XXX hack continue pypyAPI[structindex[name]] = ctypes.cast( ll2ctypes.lltype2ctypes(func.get_llhelper(space)), @@ -952,6 +1072,8 @@ cpyext_type_init = self.cpyext_type_init self.cpyext_type_init = None for pto, w_type in cpyext_type_init: + if space.is_w(w_type, space.w_str): + pto.c_tp_itemsize = 1 finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -969,10 +1091,14 @@ pypy_macros = [] renamed_symbols = [] for name in export_symbols: - name = name.replace("#", "") + if '#' in name: + name,header = name.split('#') + else: + header = pypy_decl newname = mangle_name(prefix, name) assert newname, name - pypy_macros.append('#define %s %s' % (name, newname)) + if header == pypy_decl: + pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) @@ -1001,7 +1127,7 @@ # implement function callbacks and generate function decls functions = [] decls = {} - pypy_decls = decls['pypy_decl.h'] = [] + pypy_decls = decls[pypy_decl] = [] pypy_decls.append('#define Signed long /* xxx temporary fix */\n') pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n') @@ -1017,6 +1143,8 @@ header = decls[header_name] for name, func in sorted(header_functions.iteritems()): + if not func: + continue if header == DEFAULT_HEADER: _name = name else: @@ -1042,12 +1170,15 @@ functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name) for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - name = name.replace("#", "") + if '#' in name: + name, header = name.split("#") typ = typ.replace("*", "") elif name.startswith('PyExc_'): typ = 'PyObject*' - pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name)) + header = pypy_decl + if header != pypy_decl: + decls[header].append('#define %s %s' % (name, mangle_name(prefix, name))) + decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name)) for header_name in FUNCTIONS_BY_HEADER.keys(): header = decls[header_name] @@ -1075,9 +1206,10 @@ source_dir / "pysignals.c", source_dir / "pythread.c", source_dir / "missing.c", + source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code): +def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1099,9 +1231,11 @@ # Generate definitions for global structures structs = ["#include "] + if use_micronumpy: + structs.append('#include /* api.py line 1223 */') for name, (typ, expr) in GLOBALS.iteritems(): - if name.endswith('#'): - structs.append('%s %s;' % (typ[:-1], name[:-1])) + if '#' in name: + structs.append('%s %s;' % (typ[:-1], name.split('#')[0])) elif name.startswith('PyExc_'): structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) @@ -1142,11 +1276,12 @@ use_micronumpy = space.config.objspace.usemodules.micronumpy if not use_micronumpy: return use_micronumpy - # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject - global GLOBALS, SYMBOLS_C, separate_module_files - GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + # import registers api functions by side-effect, we also need HEADER + from pypy.module.cpyext.ndarrayobject import HEADER + global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: + FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None + GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy @@ -1156,14 +1291,18 @@ export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() + prefix = 'PyPy' - generate_macros(export_symbols, prefix='PyPy') + generate_macros(export_symbols, prefix=prefix) functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix='PyPy') - code = "#include \n" + "\n".join(functions) + prefix=prefix) + code = "#include \n" + if use_micronumpy: + code += "#include /* api.py line 1290 */" + code += "\n".join(functions) - eci = build_eci(False, export_symbols, code) + eci = build_eci(False, export_symbols, code, use_micronumpy) space.fromcache(State).install_dll(eci) @@ -1175,9 +1314,14 @@ lines = ['PyObject *pypy_static_pyobjs[] = {\n'] include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - if name.endswith('#'): + if '#' in name: + name, header = name.split('#') assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') - typ, name = typ[:-1], name[:-1] + typ = typ[:-1] + if header != pypy_decl: + # since the #define is not in pypy_macros, do it here + mname = mangle_name(prefix, name) + include_lines.append('#define %s %s\n' % (name, mname)) elif name.startswith('PyExc_'): typ = 'PyTypeObject' name = '_' + name @@ -1204,6 +1348,8 @@ for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): + if not func: + continue newname = mangle_name('PyPy', name) or name deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) @@ -1211,7 +1357,7 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) def init_static_data_translated(space): builder = space.fromcache(StaticObjectBuilder) @@ -1348,10 +1494,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,11 +2,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + make_typedescr, get_typedescr, as_pyobj, Py_IncRef) ## ## Implementation of PyStringObject @@ -27,7 +27,7 @@ ## Solution ## -------- ## -## PyStringObject contains two additional members: the size and a pointer to a +## PyStringObject contains two additional members: the ob_size and a pointer to a ## char buffer; it may be NULL. ## ## - A string allocated by pypy will be converted into a PyStringObject with a @@ -36,7 +36,7 @@ ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyStringObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no +## size+1, but the reference won't be stored in the global map; there is no ## corresponding object in pypy. When from_ref() or Py_INCREF() is called, ## the pypy string is created, and added to the global map of tracked ## objects. The buffer is then supposed to be immutable. @@ -52,8 +52,8 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +PyStringObjectFields = PyVarObjectFields + \ + (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function @@ -78,10 +78,11 @@ py_str = rffi.cast(PyStringObject, py_obj) buflen = length + 1 - py_str.c_size = length + py_str.c_ob_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw', zero=True, add_memory_pressure=True) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str def string_attach(space, py_obj, w_obj): @@ -90,8 +91,10 @@ buffer must not be modified. """ py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) + py_str.c_ob_size = len(space.str_w(w_obj)) py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL def string_realize(space, py_obj): """ @@ -99,8 +102,13 @@ be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + if not py_str.c_buffer: + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1, + flavor='raw', zero=True) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrap(s) + py_str.c_ob_shash = space.hash_w(w_obj) + py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL track_reference(space, py_obj, w_obj) return w_obj @@ -169,12 +177,12 @@ ref_str.c_buffer = rffi.str2charp(s) buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 while ref_str.c_buffer[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @@ -183,7 +191,7 @@ def PyString_Size(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: ref = rffi.cast(PyStringObject, ref) - return ref.c_size + return ref.c_ob_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @@ -212,7 +220,7 @@ ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize - oldsize = py_str.c_size + oldsize = py_str.c_ob_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): @@ -236,15 +244,16 @@ if not ref[0]: return - if w_newpart is None or not PyString_Check(space, ref[0]) or \ - not PyString_Check(space, w_newpart): + if w_newpart is None or not PyString_Check(space, ref[0]) or not \ + (space.isinstance_w(w_newpart, space.w_str) or + space.isinstance_w(w_newpart, space.w_unicode)): Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) ref[0] = make_ref(space, w_newstr) + Py_IncRef(space, ref[0]) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyString_ConcatAndDel(space, ref, newpart): diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -15,6 +15,7 @@ ('DateTimeType', PyTypeObjectPtr), ('TimeType', PyTypeObjectPtr), ('DeltaType', PyTypeObjectPtr), + ('TZInfoType', PyTypeObjectPtr), )) @cpython_api([], lltype.Ptr(PyDateTime_CAPI)) @@ -40,11 +41,21 @@ datetimeAPI.c_DeltaType = rffi.cast( PyTypeObjectPtr, make_ref(space, w_type)) + w_type = space.getattr(w_datetime, space.wrap("tzinfo")) + datetimeAPI.c_TZInfoType = rffi.cast( + PyTypeObjectPtr, make_ref(space, w_type)) + return datetimeAPI -PyDateTime_Date = PyObject -PyDateTime_Time = PyObject -PyDateTime_DateTime = PyObject +PyDateTime_DateStruct = lltype.ForwardReference() +PyDateTime_TimeStruct = lltype.ForwardReference() +PyDateTime_DateTimeStruct = lltype.ForwardReference() +cpython_struct("PyDateTime_Date", PyObjectFields, PyDateTime_DateStruct) +PyDateTime_Date = lltype.Ptr(PyDateTime_DateStruct) +cpython_struct("PyDateTime_Time", PyObjectFields, PyDateTime_TimeStruct) +PyDateTime_Time = lltype.Ptr(PyDateTime_TimeStruct) +cpython_struct("PyDateTime_DateTime", PyObjectFields, PyDateTime_DateTimeStruct) +PyDateTime_DateTime = lltype.Ptr(PyDateTime_DateTimeStruct) PyDeltaObjectStruct = lltype.ForwardReference() cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct) @@ -81,6 +92,7 @@ make_check_function("PyDate_Check", "date") make_check_function("PyTime_Check", "time") make_check_function("PyDelta_Check", "timedelta") +make_check_function("PyTZInfo_Check", "tzinfo") # Constructors diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py From pypy.commits at gmail.com Sat Apr 30 22:51:13 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Apr 2016 19:51:13 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: shorten Message-ID: <57256f21.81da1c0a.db864.ffffa0b6@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5641:8a43ad85184c Date: 2016-05-01 05:50 +0300 http://bitbucket.org/pypy/extradoc/changeset/8a43ad85184c/ Log: shorten diff --git a/talk/pycon-il-2016/talk.pdf b/talk/pycon-il-2016/talk.pdf index 51c1443ef77403e7affefca9ed3b4d748cf0c7c2..c46cf494a4303e80ce9d2fe2f79795fd8e25118e GIT binary patch [cut] diff --git a/talk/pycon-il-2016/talk.rst b/talk/pycon-il-2016/talk.rst --- a/talk/pycon-il-2016/talk.rst +++ b/talk/pycon-il-2016/talk.rst @@ -107,8 +107,8 @@ - Opportunity??? -PyPy and C (1/3) ----------------- +PyPy and Third-party libraries +------------------------------ * PyPy and CFFI (Armin Rigo, Maciej Fijałkowski) @@ -116,17 +116,14 @@ * Use CFFI to call python from C - - This means you can create your own C API in pure Python ! - -PyPy and C (2/3) ----------------- - * CFFI enables embedded Python (and PyPy) in a C application (uWSGI) * Very fast on PyPy, fast enough on CPython -PyPy and C (3/3) ----------------- +PyPy and Third-party libraries +------------------------------ + +* Not everyone will rewrite in CFFI * What about C-API (glad you asked) @@ -145,8 +142,10 @@ * Hint - good things are coming -NumPyPy -------- +PyPy and Third-party libriaries: NumPyPy +---------------------------------------- + +* Numpy (and its ecosystem) is the last frontier for PyPy * https://bitbucket.org/pypy/numpy + pypy @@ -179,81 +178,6 @@ * But what about SciPy? -PyMetabiosis ------------- - -* https://github.com/rguillebert/pymetabiosis - -* Proof of concept (Romain Guillebert) - -* Allows you to use any CPython module on PyPy (scipy for example) - -* Embeds CPython into PyPy with CFFI - -* Numpy arrays can be shared between PyPy and CPython - -PyMetabiosis ------------- - -|scriptsize| - -.. sourcecode:: python - - from pymetabiosis import import_module - - cpython_virtualenv_path = - "/tmp/venv/bin/activate_this.py" - - builtin = import_module("__builtin__") - - # Activate a virtualenv for the cpython interpreter - builtin.execfile(cpython_virtualenv_path, - {"__file__" : cpython_virtualenv_path} - ) - - pylab = import_module("matplotlib.pylab") - - pylab.plot([1, 2, 3, 4]) - pylab.show() - -|end_scriptsize| - -JitPy ------ - -* http://jitpy.readthedocs.io - -* Proof of concept (Maciej Fijałkowski) - -* Embeds PyPy into CPython - -* Provides a decorator that allows you to run specific functions on PyPy - -* Is used the same way as numba, but different performance characteristics - - -JitPy ------ - -|scriptsize| - -.. sourcecode:: python - - import numpy as np - from jitpy import setup - setup('') - from jitpy.wrapper import jittify - - @jittify(['array', float], float) - def f(a, s): - r = 0 - for i in xrange(a.shape[0]): - r += a[i] * s - return s - func(np.arange(10000), 1.2) - -|end_scriptsize| - Future - wouldn't it be great if -------------------------------- @@ -265,10 +189,14 @@ The Future is Now! ------------------ +* Merged major upgrade of the C-API + * (Applause) * Native numpy (tweaked) passes 90% of tests +* Download a nightly and try http://github.com/pypy/numpy + * How to leverage the JIT and NumPyPy? Why this makes sense @@ -294,8 +222,6 @@ Questions ? Examples: -* What about this other interpreter I heard of? - * How can I get involved? * What about commercial involvement? @@ -303,3 +229,6 @@ * How can I get support? * What about Python 3.5? + +* What about this other interpreter I heard of? +